Start adding some ARM guest infrastructure stuff, but as a result get
diverted into a massive renaming of the x86 guest stuff so as to avoid
namespace clashes.



git-svn-id: svn://svn.valgrind.org/vex/trunk@584 8f6e269a-dfd6-0310-a8e1-e2731360e62c
diff --git a/priv/guest-arm/gdefs.h b/priv/guest-arm/gdefs.h
new file mode 100644
index 0000000..16752d7
--- /dev/null
+++ b/priv/guest-arm/gdefs.h
@@ -0,0 +1,198 @@
+
+/*---------------------------------------------------------------*/
+/*---                                                         ---*/
+/*--- This file (guest-arm/gdefs.h) is                        ---*/
+/*--- Copyright (c) 2004 OpenWorks LLP.  All rights reserved. ---*/
+/*---                                                         ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of LibVEX, a library for dynamic binary
+   instrumentation and translation.
+
+   Copyright (C) 2004 OpenWorks, LLP.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; Version 2 dated June 1991 of the
+   license.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, or liability
+   for damages.  See the GNU General Public License for more details.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+   USA.
+*/
+
+/* Only to be used within the guest-arm directory. */
+
+
+#ifndef __LIBVEX_GUEST_ARM_DEFS_H
+#define __LIBVEX_GUEST_ARM_DEFS_H
+
+
+/*---------------------------------------------------------*/
+/*--- arm to IR conversion                              ---*/
+/*---------------------------------------------------------*/
+
+extern
+IRBB* bbToIR_ARM ( UChar* armCode, 
+                   Addr64 eip, 
+                   Int*   guest_bytes_read, 
+                   Bool   (*byte_accessible)(Addr64),
+                   Bool   (*resteerOkFn)(Addr64),
+                   Bool   host_bigendian );
+
+/* Used by the optimiser to specialise calls to helpers. */
+extern
+IRExpr* guest_arm_spechelper ( Char* function_name,
+                               IRExpr** args );
+
+/* Describes to the optimser which part of the guest state require
+   precise memory exceptions.  This is logically part of the guest
+   state description. */
+extern 
+Bool guest_arm_state_requires_precise_mem_exns ( Int, Int );
+
+extern
+VexGuestLayout armGuest_layout;
+
+
+/*---------------------------------------------------------*/
+/*--- arm guest helpers                                 ---*/
+/*---------------------------------------------------------*/
+
+/* --- CLEAN HELPERS --- */
+
+extern UInt  armg_calculate_flags_all ( 
+                UInt cc_op, UInt cc_dep1, UInt cc_dep2 
+             );
+
+extern UInt  armg_calculate_condition ( 
+                UInt/*ARMCondcode*/ cond, 
+                UInt cc_op, 
+                UInt cc_dep1, UInt cc_dep2 
+             );
+
+
+/*---------------------------------------------------------*/
+/*--- Condition code stuff                              ---*/
+/*---------------------------------------------------------*/
+
+/* Flags masks.  Defines positions of flags bits in the CPSR. */
+#define ARMG_CC_SHIFT_N  31
+#define ARMG_CC_SHIFT_Z  30
+#define ARMG_CC_SHIFT_C  29
+#define ARMG_CC_SHIFT_V  28
+
+#define ARMG_CC_MASK_N    (1 << ARMG_CC_SHIFT_N)
+#define ARMG_CC_MASK_Z    (1 << ARMG_CC_SHIFT_Z)
+#define ARMG_CC_MASK_V    (1 << ARMG_CC_SHIFT_V)
+#define ARMG_CC_MASK_C    (1 << ARMG_CC_SHIFT_C)
+
+/* Flag thunk descriptors.  A three-word thunk is used to record
+   details of the most recent flag-setting operation, so the flags can
+   be computed later if needed.
+
+   The three words are:
+
+      CC_OP, which describes the operation.
+
+      CC_DEP1 and CC_DEP2.  These are arguments to the operation.
+         We want Memcheck to believe that the resulting flags are
+         data-dependent on both CC_DEP1 and CC_DEP2, hence the 
+         name DEP.
+
+   When building the thunk, it is always necessary to write words into
+   CC_DEP1 and CC_DEP2, even if those args are not used given the
+   CC_OP field.  This is important because otherwise Memcheck could
+   give false positives as it does not understand the relationship
+   between the CC_OP field and CC_DEP1 and CC_DEP2, and so believes
+   that the definedness of the stored flags always depends on both
+   CC_DEP1 and CC_DEP2.
+
+   A summary of the field usages is:
+   TODO: make this right
+
+   Operation          DEP1               DEP2               NDEP
+   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+   add/sub/mul        first arg          second arg         unused
+
+   adc/sbb            first arg          (second arg)
+                                         XOR old_carry      old_carry
+
+   and/or/xor         result             zero               unused
+
+   inc/dec            result             zero               old_carry
+
+   shl/shr/sar        result             subshifted-        unused
+                                         result
+
+   rol/ror            result             zero               old_flags
+
+   copy               old_flags          zero               unused.
+
+
+   Therefore Memcheck will believe the following:
+
+   * add/sub/mul -- definedness of result flags depends on definedness
+     of both args.
+
+     etc etc
+*/
+enum {
+    ARMG_CC_OP_COPY,    /* DEP1 = current flags, DEP2 = 0 */
+                        /* just copy DEP1 to output */
+
+    ARMG_CC_OP_ADD      /* 1   DEP1 = argL, DEP2 = argR */
+};
+
+/* requires further study */
+
+
+
+/* Defines conditions which we can ask for (ARM ARM 2e page A3-6) */
+
+typedef
+   enum {
+      ARMCondEQ     = 0,  /* equal                               */
+      ARMCondNE     = 1,  /* not equal                           */
+
+      ARMCondHS     = 2,  /* >=u (higher or same)                */
+      ARMCondLO     = 3,  /* <u  (lower)                         */
+
+      ARMCondMI     = 4,  /* minus (negative)                    */
+      ARMCondPL     = 5,  /* plus (zero or +ve)                  */
+
+      ARMCondVS     = 6,  /* overflow                            */
+      ARMCondVC     = 7,  /* no overflow                         */
+
+      ARMCondHI     = 8,  /* >u   (higher)                       */
+      ARMCondLS     = 9,  /* <=u  (lower or same)                */
+
+      ARMCondGE     = 10, /* >=s (signed greater or equal)       */
+      ARMCondLT     = 11, /* <s  (signed less than)              */
+
+      ARMCondGT     = 12, /* >s  (signed greater)                */
+      ARMCondLE     = 13, /* <=s (signed less or equal)          */
+
+      ARMCondAL     = 14, /* always (unconditional)              */
+      ARMCondNV     = 15  /* never (basically undefined meaning) */
+   }
+   ARMCondcode;
+
+#endif /* ndef __LIBVEX_GUEST_ARM_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                   guest-arm/gdefs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/priv/guest-arm/ghelpers.c b/priv/guest-arm/ghelpers.c
new file mode 100644
index 0000000..b0324c5
--- /dev/null
+++ b/priv/guest-arm/ghelpers.c
@@ -0,0 +1,263 @@
+
+/*---------------------------------------------------------------*/
+/*---                                                         ---*/
+/*--- This file (guest-arm/ghelpers.c) is                     ---*/
+/*--- Copyright (c) 2004 OpenWorks LLP.  All rights reserved. ---*/
+/*---                                                         ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of LibVEX, a library for dynamic binary
+   instrumentation and translation.
+
+   Copyright (C) 2004 OpenWorks, LLP.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; Version 2 dated June 1991 of the
+   license.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, or liability
+   for damages.  See the GNU General Public License for more details.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+   USA.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_guest_arm.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "main/vex_util.h"
+#include "guest-arm/gdefs.h"
+
+
+/* This file contains helper functions for arm guest code.
+   Calls to these functions are generated by the back end.
+   These calls are of course in the host machine code and 
+   this file will be compiled to host machine code, so that
+   all makes sense.  
+
+   Only change the signatures of these helper functions very
+   carefully.  If you change the signature here, you'll have to change
+   the parameters passed to it in the IR calls constructed by
+   guest-arm/toIR.c.
+*/
+
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate all the 4 flags from the supplied thunk parameters. */
+UInt calculate_eflags_all ( UInt cc_op, 
+                            UInt cc_dep1_formal, 
+                            UInt cc_dep2_formal )
+{
+   switch (cc_op) {
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("calculate_eflags_all(ARM)( %d, 0x%x, 0x%x )\n",
+                    cc_op, cc_dep1_formal, cc_dep2_formal );
+         vpanic("calculate_eflags_all(ARM)");
+   }
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* returns 1 or 0 */
+/*static*/ UInt calculate_condition ( UInt/*Condcode*/ cond, 
+                                      UInt cc_op, 
+                                      UInt cc_dep1, 
+                                      UInt cc_dep2 )
+{
+  //UInt nzvc = calculate_eflags_all(cc_op, cc_dep1, cc_dep2);
+
+   switch (cond) {
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("calculate_condition(ARM)( %d, %d, 0x%x, 0x%x )\n",
+                    cond, cc_op, cc_dep1, cc_dep2 );
+         vpanic("calculate_condition(ARM)");
+   }
+}
+
+
+/* Used by the optimiser to try specialisations.  Returns an
+   equivalent expression, or NULL if none. */
+
+#if 0
+static Bool isU32 ( IRExpr* e, UInt n )
+{
+   return e->tag == Iex_Const
+          && e->Iex.Const.con->tag == Ico_U32
+          && e->Iex.Const.con->Ico.U32 == n;
+}
+#endif
+IRExpr* guest_arm_spechelper ( Char* function_name,
+                               IRExpr** args )
+{
+   return NULL;
+}
+
+
+/*----------------------------------------------*/
+/*--- The exported fns ..                    ---*/
+/*----------------------------------------------*/
+
+/* VISIBLE TO LIBVEX CLIENT */
+void LibVEX_GuestARM_put_flags ( UInt eflags_native,
+                                 /*OUT*/VexGuestARMState* vex_state )
+{
+   vassert(0); // FIXME
+#if 0
+   vex_state->guest_DFLAG
+      = (eflags_native & (1<<10)) ? 0xFFFFFFFF : 0x00000001;
+   vex_state->guest_IDFLAG
+      = (eflags_native & (1<<21)) ? 1 : 0;
+
+   /* Mask out everything except O S Z A C P. */
+   eflags_native
+      &= (CC_MASK_C | CC_MASK_P | CC_MASK_A 
+          | CC_MASK_Z | CC_MASK_S | CC_MASK_O);
+
+   vex_state->guest_CC_OP   = CC_OP_COPY;
+   vex_state->guest_CC_DEP1 = eflags_native;
+   vex_state->guest_CC_DEP2 = 0;
+   vex_state->guest_CC_NDEP = 0; /* unnecessary paranoia */
+#endif
+}
+
+
+/* VISIBLE TO LIBVEX CLIENT */
+UInt LibVEX_GuestARM_get_eflags ( /*IN*/VexGuestARMState* vex_state )
+{
+   vassert(0); // FIXME
+#if 0
+   UInt eflags = calculate_eflags_all(
+                    vex_state->guest_CC_OP,
+                    vex_state->guest_CC_DEP1,
+                    vex_state->guest_CC_DEP2,
+                    vex_state->guest_CC_NDEP
+                 );
+   UInt dflag = vex_state->guest_DFLAG;
+   vassert(dflag == 1 || dflag == 0xFFFFFFFF);
+   if (dflag == 0xFFFFFFFF)
+      eflags |= (1<<10);
+   if (vex_state->guest_IDFLAG == 1)
+      eflags |= (1<<21);
+					     
+   return eflags;
+#endif
+}
+
+/* VISIBLE TO LIBVEX CLIENT */
+void LibVEX_GuestARM_initialise ( /*OUT*/VexGuestARMState* vex_state )
+{
+   vex_state->guest_R0  = 0;
+   vex_state->guest_R1  = 0;
+   vex_state->guest_R2  = 0;
+   vex_state->guest_R3  = 0;
+   vex_state->guest_R4  = 0;
+   vex_state->guest_R5  = 0;
+   vex_state->guest_R6  = 0;
+   vex_state->guest_R7  = 0;
+   vex_state->guest_R8  = 0;
+   vex_state->guest_R9  = 0;
+   vex_state->guest_R10 = 0;
+   vex_state->guest_R11 = 0;
+   vex_state->guest_R12 = 0;
+   vex_state->guest_R13 = 0;
+   vex_state->guest_R14 = 0;
+   vex_state->guest_R15 = 0;
+
+   vex_state->guest_CC_OP   = ARMG_CC_OP_COPY;
+   vex_state->guest_CC_DEP1 = 0;
+   vex_state->guest_CC_DEP2 = 0;
+
+   vex_state->guest_SYSCALLNO = 0;
+}
+
+
+/*-----------------------------------------------------------*/
+/*--- Describing the arm guest state, for the benefit     ---*/
+/*--- of iropt and instrumenters.                         ---*/
+/*-----------------------------------------------------------*/
+
+/* Figure out if any part of the guest state contained in minoff
+   .. maxoff requires precise memory exceptions.  If in doubt return
+   True (but this is generates significantly slower code).  
+
+   We enforce precise exns for guest %ESP and %EIP only.
+*/
+Bool guest_arm_state_requires_precise_mem_exns ( Int minoff, 
+                                                 Int maxoff)
+{
+   return True; // FIXME (also comment above)
+#if 0
+   Int esp_min = offsetof(VexGuestX86State, guest_ESP);
+   Int esp_max = esp_min + 4 - 1;
+   Int eip_min = offsetof(VexGuestX86State, guest_EIP);
+   Int eip_max = eip_min + 4 - 1;
+
+   if (maxoff < esp_min || minoff > esp_max) {
+      /* no overlap with esp */
+   } else {
+      return True;
+   }
+
+   if (maxoff < eip_min || minoff > eip_max) {
+      /* no overlap with eip */
+   } else {
+      return True;
+   }
+
+   return False;
+#endif
+}
+
+
+
+#define ALWAYSDEFD(field)                           \
+    { offsetof(VexGuestARMState, field),            \
+      (sizeof ((VexGuestARMState*)0)->field) }
+
+VexGuestLayout
+   armGuest_layout 
+      = { 
+          /* Total size of the guest state, in bytes. */
+          .total_sizeB = sizeof(VexGuestARMState),
+
+          /* Describe the stack pointer. */
+          .offset_SP = offsetof(VexGuestARMState,guest_R13),
+          .sizeof_SP = 4,
+
+          /* Describe the instruction pointer. */
+          .offset_IP = offsetof(VexGuestARMState,guest_R15),
+          .sizeof_IP = 4,
+
+          /* Describe any sections to be regarded by Memcheck as
+             'always-defined'. */
+          .n_alwaysDefd = 2,
+          /* flags thunk: OP is always defd, whereas DEP1 and DEP2
+             have to be tracked.  See detailed comment in gdefs.h on
+             meaning of thunk fields. */
+
+          .alwaysDefd 
+             = { /*  0 */ ALWAYSDEFD(guest_CC_OP),
+                 /*  1 */ ALWAYSDEFD(guest_SYSCALLNO)
+               }
+        };
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                guest-arm/ghelpers.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/priv/guest-x86/gdefs.h b/priv/guest-x86/gdefs.h
index d5c7483..77099c1 100644
--- a/priv/guest-x86/gdefs.h
+++ b/priv/guest-x86/gdefs.h
@@ -1,7 +1,7 @@
 
 /*---------------------------------------------------------------*/
 /*---                                                         ---*/
-/*--- This file (x86guest_defs.h) is                          ---*/
+/*--- This file (guest-x86/gdefs.h) is                        ---*/
 /*--- Copyright (c) 2004 OpenWorks LLP.  All rights reserved. ---*/
 /*---                                                         ---*/
 /*---------------------------------------------------------------*/
@@ -38,8 +38,8 @@
 /* Some of this stuff is taken from QEMU, which is Copyright (c) 2003
    Fabrice Bellard, and licensed under the LGPL. */
 
-#ifndef __LIBVEX_X86GUEST_DEFS_H
-#define __LIBVEX_X86GUEST_DEFS_H
+#ifndef __LIBVEX_GUEST_X86_DEFS_H
+#define __LIBVEX_GUEST_X86_DEFS_H
 
 
 /*---------------------------------------------------------*/
@@ -47,17 +47,17 @@
 /*---------------------------------------------------------*/
 
 extern
-IRBB* bbToIR_X86Instr ( UChar* x86code, 
-                        Addr64 eip, 
-                        Int*   guest_bytes_read, 
-                        Bool   (*byte_accessible)(Addr64),
-                        Bool   (*resteerOkFn)(Addr64),
-                        Bool   host_bigendian );
+IRBB* bbToIR_X86 ( UChar* x86code, 
+                   Addr64 eip, 
+                   Int*   guest_bytes_read, 
+                   Bool   (*byte_accessible)(Addr64),
+                   Bool   (*resteerOkFn)(Addr64),
+                   Bool   host_bigendian );
 
 /* Used by the optimiser to specialise calls to helpers. */
 extern
-IRExpr* x86guest_spechelper ( Char* function_name,
-                              IRExpr** args );
+IRExpr* guest_x86_spechelper ( Char* function_name,
+                               IRExpr** args );
 
 /* Describes to the optimser which part of the guest state require
    precise memory exceptions.  This is logically part of the guest
@@ -75,84 +75,90 @@
 
 /* --- CLEAN HELPERS --- */
 
-extern UInt  calculate_eflags_all ( UInt cc_op, 
-                                    UInt cc_dep1, UInt cc_dep2, UInt cc_ndep );
+extern UInt  x86g_calculate_eflags_all ( 
+                UInt cc_op, UInt cc_dep1, UInt cc_dep2, UInt cc_ndep 
+             );
 
-extern UInt  calculate_eflags_c   ( UInt cc_op, 
-                                    UInt cc_dep1, UInt cc_dep2, UInt cc_ndep );
+extern UInt  x86g_calculate_eflags_c ( 
+                UInt cc_op, UInt cc_dep1, UInt cc_dep2, UInt cc_ndep 
+             );
 
-extern UInt  calculate_condition  ( UInt/*Condcode*/ cond, 
-                                    UInt cc_op, 
-                                    UInt cc_dep1, UInt cc_dep2, UInt cc_ndep );
+extern UInt  x86g_calculate_condition ( 
+                UInt/*X86Condcode*/ cond, 
+                UInt cc_op, 
+                UInt cc_dep1, UInt cc_dep2, UInt cc_ndep 
+             );
 
-extern UInt  calculate_FXAM ( UInt tag, ULong dbl );
+extern UInt  x86g_calculate_FXAM ( UInt tag, ULong dbl );
 
-extern ULong calculate_RCR  ( UInt arg, UInt rot_amt, UInt eflags_in, UInt sz );
+extern ULong x86g_calculate_RCR  ( 
+                UInt arg, UInt rot_amt, UInt eflags_in, UInt sz 
+             );
 
 /* --- Clean helpers for MMX --- */
 
-extern ULong calculate_add32x2 ( ULong, ULong );
-extern ULong calculate_add16x4 ( ULong, ULong );
-extern ULong calculate_add8x8  ( ULong, ULong );
+extern ULong x86g_calculate_add32x2 ( ULong, ULong );
+extern ULong x86g_calculate_add16x4 ( ULong, ULong );
+extern ULong x86g_calculate_add8x8  ( ULong, ULong );
 
-extern ULong calculate_qadd16Sx4 ( ULong, ULong );
-extern ULong calculate_qadd8Sx8  ( ULong, ULong );
+extern ULong x86g_calculate_qadd16Sx4 ( ULong, ULong );
+extern ULong x86g_calculate_qadd8Sx8  ( ULong, ULong );
 
-extern ULong calculate_qadd16Ux4 ( ULong, ULong );
-extern ULong calculate_qadd8Ux8  ( ULong, ULong );
+extern ULong x86g_calculate_qadd16Ux4 ( ULong, ULong );
+extern ULong x86g_calculate_qadd8Ux8  ( ULong, ULong );
 
-extern ULong calculate_sub32x2 ( ULong, ULong );
-extern ULong calculate_sub16x4 ( ULong, ULong );
-extern ULong calculate_sub8x8  ( ULong, ULong );
+extern ULong x86g_calculate_sub32x2 ( ULong, ULong );
+extern ULong x86g_calculate_sub16x4 ( ULong, ULong );
+extern ULong x86g_calculate_sub8x8  ( ULong, ULong );
 
-extern ULong calculate_qsub16Sx4 ( ULong, ULong );
-extern ULong calculate_qsub8Sx8  ( ULong, ULong );
+extern ULong x86g_calculate_qsub16Sx4 ( ULong, ULong );
+extern ULong x86g_calculate_qsub8Sx8  ( ULong, ULong );
 
-extern ULong calculate_qsub16Ux4 ( ULong, ULong );
-extern ULong calculate_qsub8Ux8  ( ULong, ULong );
+extern ULong x86g_calculate_qsub16Ux4 ( ULong, ULong );
+extern ULong x86g_calculate_qsub8Ux8  ( ULong, ULong );
 
-extern ULong calculate_mulhi16x4 ( ULong, ULong );
-extern ULong calculate_mullo16x4 ( ULong, ULong );
+extern ULong x86g_calculate_mulhi16x4 ( ULong, ULong );
+extern ULong x86g_calculate_mullo16x4 ( ULong, ULong );
 
-extern ULong calculate_pmaddwd ( ULong, ULong );
+extern ULong x86g_calculate_pmaddwd ( ULong, ULong );
 
-extern ULong calculate_cmpeq32x2  ( ULong, ULong );
-extern ULong calculate_cmpeq16x4  ( ULong, ULong );
-extern ULong calculate_cmpeq8x8   ( ULong, ULong );
-extern ULong calculate_cmpge32Sx2 ( ULong, ULong );
-extern ULong calculate_cmpge16Sx4 ( ULong, ULong );
-extern ULong calculate_cmpge8Sx8  ( ULong, ULong );
+extern ULong x86g_calculate_cmpeq32x2  ( ULong, ULong );
+extern ULong x86g_calculate_cmpeq16x4  ( ULong, ULong );
+extern ULong x86g_calculate_cmpeq8x8   ( ULong, ULong );
+extern ULong x86g_calculate_cmpge32Sx2 ( ULong, ULong );
+extern ULong x86g_calculate_cmpge16Sx4 ( ULong, ULong );
+extern ULong x86g_calculate_cmpge8Sx8  ( ULong, ULong );
 
-extern ULong calculate_packssdw ( ULong, ULong );
-extern ULong calculate_packsswb ( ULong, ULong );
-extern ULong calculate_packuswb ( ULong, ULong );
+extern ULong x86g_calculate_packssdw ( ULong, ULong );
+extern ULong x86g_calculate_packsswb ( ULong, ULong );
+extern ULong x86g_calculate_packuswb ( ULong, ULong );
 
-extern ULong calculate_punpckhbw ( ULong, ULong );
-extern ULong calculate_punpcklbw ( ULong, ULong );
-extern ULong calculate_punpckhwd ( ULong, ULong );
-extern ULong calculate_punpcklwd ( ULong, ULong );
-extern ULong calculate_punpckhdq ( ULong, ULong );
-extern ULong calculate_punpckldq ( ULong, ULong );
+extern ULong x86g_calculate_punpckhbw ( ULong, ULong );
+extern ULong x86g_calculate_punpcklbw ( ULong, ULong );
+extern ULong x86g_calculate_punpckhwd ( ULong, ULong );
+extern ULong x86g_calculate_punpcklwd ( ULong, ULong );
+extern ULong x86g_calculate_punpckhdq ( ULong, ULong );
+extern ULong x86g_calculate_punpckldq ( ULong, ULong );
 
-extern ULong calculate_shl16x4 ( ULong, ULong );
-extern ULong calculate_shl32x2 ( ULong, ULong );
-extern ULong calculate_shl64x1 ( ULong, ULong );
+extern ULong x86g_calculate_shl16x4 ( ULong, ULong );
+extern ULong x86g_calculate_shl32x2 ( ULong, ULong );
+extern ULong x86g_calculate_shl64x1 ( ULong, ULong );
 
-extern ULong calculate_shr16Ux4 ( ULong, ULong );
-extern ULong calculate_shr32Ux2 ( ULong, ULong );
-extern ULong calculate_shr64Ux1 ( ULong, ULong );
+extern ULong x86g_calculate_shr16Ux4 ( ULong, ULong );
+extern ULong x86g_calculate_shr32Ux2 ( ULong, ULong );
+extern ULong x86g_calculate_shr64Ux1 ( ULong, ULong );
 
-extern ULong calculate_shr16Sx4 ( ULong, ULong );
-extern ULong calculate_shr32Sx2 ( ULong, ULong );
+extern ULong x86g_calculate_shr16Sx4 ( ULong, ULong );
+extern ULong x86g_calculate_shr32Sx2 ( ULong, ULong );
 
 
 /* --- DIRTY HELPERS --- */
 
-extern ULong loadF80le  ( UInt );
+extern ULong x86g_loadF80le  ( UInt );
 
-extern void  storeF80le ( UInt, ULong );
+extern void  x86g_storeF80le ( UInt, ULong );
 
-extern void  dirtyhelper_CPUID ( VexGuestX86State* );
+extern void  x86g_dirtyhelper_CPUID ( VexGuestX86State* );
 
 
 /*---------------------------------------------------------*/
@@ -160,25 +166,25 @@
 /*---------------------------------------------------------*/
 
 /* eflags masks */
-#define CC_SHIFT_O   11
-#define CC_SHIFT_S   7
-#define CC_SHIFT_Z   6
-#define CC_SHIFT_A   4
-#define CC_SHIFT_C   0
-#define CC_SHIFT_P   2
+#define X86G_CC_SHIFT_O   11
+#define X86G_CC_SHIFT_S   7
+#define X86G_CC_SHIFT_Z   6
+#define X86G_CC_SHIFT_A   4
+#define X86G_CC_SHIFT_C   0
+#define X86G_CC_SHIFT_P   2
 
-#define CC_MASK_O    (1 << CC_SHIFT_O)
-#define CC_MASK_S    (1 << CC_SHIFT_S)
-#define CC_MASK_Z    (1 << CC_SHIFT_Z)
-#define CC_MASK_A    (1 << CC_SHIFT_A)
-#define CC_MASK_C    (1 << CC_SHIFT_C)
-#define CC_MASK_P    (1 << CC_SHIFT_P)
+#define X86G_CC_MASK_O    (1 << X86G_CC_SHIFT_O)
+#define X86G_CC_MASK_S    (1 << X86G_CC_SHIFT_S)
+#define X86G_CC_MASK_Z    (1 << X86G_CC_SHIFT_Z)
+#define X86G_CC_MASK_A    (1 << X86G_CC_SHIFT_A)
+#define X86G_CC_MASK_C    (1 << X86G_CC_SHIFT_C)
+#define X86G_CC_MASK_P    (1 << X86G_CC_SHIFT_P)
 
 /* FPU flag masks */
-#define FC_MASK_C3   (1 << 14)
-#define FC_MASK_C2   (1 << 10)
-#define FC_MASK_C1   (1 << 9)
-#define FC_MASK_C0   (1 << 8)
+#define X86G_FC_MASK_C3   (1 << 14)
+#define X86G_FC_MASK_C2   (1 << 10)
+#define X86G_FC_MASK_C1   (1 << 9)
+#define X86G_FC_MASK_C0   (1 << 8)
 
 /* %EFLAGS thunk descriptors.  A four-word thunk is used to record
    details of the most recent flag-setting operation, so the flags can
@@ -298,96 +304,96 @@
      both results (flags and actual value).
 */
 enum {
-    CC_OP_COPY,    /* DEP1 = current flags, DEP2 = 0, NDEP = unused */
-                   /* just copy DEP1 to output */
+    X86G_CC_OP_COPY,    /* DEP1 = current flags, DEP2 = 0, NDEP = unused */
+                        /* just copy DEP1 to output */
 
-    CC_OP_ADDB,    /* 1 */
-    CC_OP_ADDW,    /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */
-    CC_OP_ADDL,    /* 3 */
+    X86G_CC_OP_ADDB,    /* 1 */
+    X86G_CC_OP_ADDW,    /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */
+    X86G_CC_OP_ADDL,    /* 3 */
 
-    CC_OP_SUBB,    /* 4 */
-    CC_OP_SUBW,    /* 5 DEP1 = argL, DEP2 = argR, NDEP = unused */
-    CC_OP_SUBL,    /* 6 */
+    X86G_CC_OP_SUBB,    /* 4 */
+    X86G_CC_OP_SUBW,    /* 5 DEP1 = argL, DEP2 = argR, NDEP = unused */
+    X86G_CC_OP_SUBL,    /* 6 */
 
-    CC_OP_ADCB,    /* 7 */
-    CC_OP_ADCW,    /* 8 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
-    CC_OP_ADCL,    /* 9 */
+    X86G_CC_OP_ADCB,    /* 7 */
+    X86G_CC_OP_ADCW,    /* 8 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
+    X86G_CC_OP_ADCL,    /* 9 */
 
-    CC_OP_SBBB,    /* 10 */
-    CC_OP_SBBW,    /* 11 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
-    CC_OP_SBBL,    /* 12 */
+    X86G_CC_OP_SBBB,    /* 10 */
+    X86G_CC_OP_SBBW,    /* 11 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
+    X86G_CC_OP_SBBL,    /* 12 */
 
-    CC_OP_LOGICB,  /* 13 */
-    CC_OP_LOGICW,  /* 14 DEP1 = result, DEP2 = 0, NDEP = unused */
-    CC_OP_LOGICL,  /* 15 */
+    X86G_CC_OP_LOGICB,  /* 13 */
+    X86G_CC_OP_LOGICW,  /* 14 DEP1 = result, DEP2 = 0, NDEP = unused */
+    X86G_CC_OP_LOGICL,  /* 15 */
 
-    CC_OP_INCB,    /* 16 */
-    CC_OP_INCW,    /* 17 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
-    CC_OP_INCL,    /* 18 */
+    X86G_CC_OP_INCB,    /* 16 */
+    X86G_CC_OP_INCW,    /* 17 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
+    X86G_CC_OP_INCL,    /* 18 */
 
-    CC_OP_DECB,    /* 19 */
-    CC_OP_DECW,    /* 20 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
-    CC_OP_DECL,    /* 21 */
+    X86G_CC_OP_DECB,    /* 19 */
+    X86G_CC_OP_DECW,    /* 20 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
+    X86G_CC_OP_DECL,    /* 21 */
 
-    CC_OP_SHLB,    /* 22 DEP1 = res, DEP2 = res', NDEP = unused */
-    CC_OP_SHLW,    /* 23 where res' is like res but shifted one bit less */
-    CC_OP_SHLL,    /* 24 */
+    X86G_CC_OP_SHLB,    /* 22 DEP1 = res, DEP2 = res', NDEP = unused */
+    X86G_CC_OP_SHLW,    /* 23 where res' is like res but shifted one bit less */
+    X86G_CC_OP_SHLL,    /* 24 */
 
-    CC_OP_SHRB,    /* 25 DEP1 = res, DEP2 = res', NDEP = unused */
-    CC_OP_SHRW,    /* 26 where res' is like res but shifted one bit less */
-    CC_OP_SHRL,    /* 27 */
+    X86G_CC_OP_SHRB,    /* 25 DEP1 = res, DEP2 = res', NDEP = unused */
+    X86G_CC_OP_SHRW,    /* 26 where res' is like res but shifted one bit less */
+    X86G_CC_OP_SHRL,    /* 27 */
 
-    CC_OP_ROLB,    /* 28 */
-    CC_OP_ROLW,    /* 29 DEP1 = res, DEP2 = 0, NDEP = old flags */
-    CC_OP_ROLL,    /* 30 */
+    X86G_CC_OP_ROLB,    /* 28 */
+    X86G_CC_OP_ROLW,    /* 29 DEP1 = res, DEP2 = 0, NDEP = old flags */
+    X86G_CC_OP_ROLL,    /* 30 */
 
-    CC_OP_RORB,    /* 31 */
-    CC_OP_RORW,    /* 32 DEP1 = res, DEP2 = 0, NDEP = old flags */
-    CC_OP_RORL,    /* 33 */
+    X86G_CC_OP_RORB,    /* 31 */
+    X86G_CC_OP_RORW,    /* 32 DEP1 = res, DEP2 = 0, NDEP = old flags */
+    X86G_CC_OP_RORL,    /* 33 */
 
-    CC_OP_UMULB,   /* 34 */
-    CC_OP_UMULW,   /* 35 DEP1 = argL, DEP2 = argR, NDEP = unused */
-    CC_OP_UMULL,   /* 36 */
+    X86G_CC_OP_UMULB,   /* 34 */
+    X86G_CC_OP_UMULW,   /* 35 DEP1 = argL, DEP2 = argR, NDEP = unused */
+    X86G_CC_OP_UMULL,   /* 36 */
 
-    CC_OP_SMULB,   /* 37 */
-    CC_OP_SMULW,   /* 38 DEP1 = argL, DEP2 = argR, NDEP = unused */
-    CC_OP_SMULL,   /* 39 */
+    X86G_CC_OP_SMULB,   /* 37 */
+    X86G_CC_OP_SMULW,   /* 38 DEP1 = argL, DEP2 = argR, NDEP = unused */
+    X86G_CC_OP_SMULL,   /* 39 */
 
-    CC_OP_NUMBER
+    X86G_CC_OP_NUMBER
 };
 
 typedef
    enum {
-      CondO      = 0,  /* overflow           */
-      CondNO     = 1,  /* no overflow        */
+      X86CondO      = 0,  /* overflow           */
+      X86CondNO     = 1,  /* no overflow        */
 
-      CondB      = 2,  /* below              */
-      CondNB     = 3,  /* not below          */
+      X86CondB      = 2,  /* below              */
+      X86CondNB     = 3,  /* not below          */
 
-      CondZ      = 4,  /* zero               */
-      CondNZ     = 5,  /* not zero           */
+      X86CondZ      = 4,  /* zero               */
+      X86CondNZ     = 5,  /* not zero           */
 
-      CondBE     = 6,  /* below or equal     */
-      CondNBE    = 7,  /* not below or equal */
+      X86CondBE     = 6,  /* below or equal     */
+      X86CondNBE    = 7,  /* not below or equal */
 
-      CondS      = 8,  /* negative           */
-      CondNS     = 9,  /* not negative       */
+      X86CondS      = 8,  /* negative           */
+      X86CondNS     = 9,  /* not negative       */
 
-      CondP      = 10, /* parity even        */
-      CondNP     = 11, /* not parity even    */
+      X86CondP      = 10, /* parity even        */
+      X86CondNP     = 11, /* not parity even    */
 
-      CondL      = 12, /* jump less          */
-      CondNL     = 13, /* not less           */
+      X86CondL      = 12, /* jump less          */
+      X86CondNL     = 13, /* not less           */
 
-      CondLE     = 14, /* less or equal      */
-      CondNLE    = 15, /* not less or equal  */
+      X86CondLE     = 14, /* less or equal      */
+      X86CondNLE    = 15, /* not less or equal  */
 
-      CondAlways = 16  /* HACK */
+      X86CondAlways = 16  /* HACK */
    }
-   Condcode;
+   X86Condcode;
 
-#endif /* ndef __LIBVEX_X86GUEST_DEFS_H */
+#endif /* ndef __LIBVEX_GUEST_X86_DEFS_H */
 
 /*---------------------------------------------------------------*/
-/*--- end                                     x86guest_defs.h ---*/
+/*--- end                                   guest-x86/gdefs.h ---*/
 /*---------------------------------------------------------------*/
diff --git a/priv/guest-x86/ghelpers.c b/priv/guest-x86/ghelpers.c
index 5ea6410..af352b7 100644
--- a/priv/guest-x86/ghelpers.c
+++ b/priv/guest-x86/ghelpers.c
@@ -51,7 +51,7 @@
    Only change the signatures of these helper functions very
    carefully.  If you change the signature here, you'll have to change
    the parameters passed to it in the IR calls constructed by
-   x86toIR.c.
+   guest-x86/toIR.c.
 
    Some of this code/logic is derived from QEMU, which is copyright
    Fabrice Bellard, licensed under the LGPL.  It is used with
@@ -64,38 +64,38 @@
 
 
 static const UChar parity_table[256] = {
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    CC_MASK_P, 0, 0, CC_MASK_P, 0, CC_MASK_P, CC_MASK_P, 0,
-    0, CC_MASK_P, CC_MASK_P, 0, CC_MASK_P, 0, 0, CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
 };
 
 /* n must be a constant to be efficient */
@@ -142,7 +142,7 @@
      zf = ((DATA_UTYPE)res == 0) << 6;				\
      sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
      of = lshift((argL ^ argR ^ -1) & (argL ^ res), 		\
-                 12 - DATA_BITS) & CC_MASK_O;			\
+                 12 - DATA_BITS) & X86G_CC_MASK_O;		\
      return cf | pf | af | zf | sf | of;			\
    }								\
 }
@@ -163,7 +163,7 @@
      zf = ((DATA_UTYPE)res == 0) << 6;				\
      sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
      of = lshift((argL ^ argR) & (argL ^ res),	 		\
-                 12 - DATA_BITS) & CC_MASK_O; 			\
+                 12 - DATA_BITS) & X86G_CC_MASK_O; 		\
      return cf | pf | af | zf | sf | of;			\
    }								\
 }
@@ -175,7 +175,7 @@
    PREAMBLE(DATA_BITS);						\
    { Int cf, pf, af, zf, sf, of;				\
      Int argL, argR, oldC, res;		       			\
-     oldC = CC_NDEP & CC_MASK_C;				\
+     oldC = CC_NDEP & X86G_CC_MASK_C;				\
      argL = CC_DEP1;						\
      argR = CC_DEP2 ^ oldC;	       				\
      res  = (argL + argR) + oldC;				\
@@ -188,7 +188,7 @@
      zf = ((DATA_UTYPE)res == 0) << 6;				\
      sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
      of = lshift((argL ^ argR ^ -1) & (argL ^ res), 		\
-                  12 - DATA_BITS) & CC_MASK_O;			\
+                  12 - DATA_BITS) & X86G_CC_MASK_O;		\
      return cf | pf | af | zf | sf | of;			\
    }								\
 }
@@ -200,7 +200,7 @@
    PREAMBLE(DATA_BITS);						\
    { Int cf, pf, af, zf, sf, of;				\
      Int argL, argR, oldC, res;		       			\
-     oldC = CC_NDEP & CC_MASK_C;				\
+     oldC = CC_NDEP & X86G_CC_MASK_C;				\
      argL = CC_DEP1;						\
      argR = CC_DEP2 ^ oldC;	       				\
      res  = (argL - argR) - oldC;				\
@@ -213,7 +213,7 @@
      zf = ((DATA_UTYPE)res == 0) << 6;				\
      sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
      of = lshift((argL ^ argR) & (argL ^ res), 			\
-                 12 - DATA_BITS) & CC_MASK_O;			\
+                 12 - DATA_BITS) & X86G_CC_MASK_O;		\
      return cf | pf | af | zf | sf | of;			\
    }								\
 }
@@ -244,7 +244,7 @@
      res  = CC_DEP1;						\
      argL = res - 1;						\
      argR = 1;							\
-     cf = CC_NDEP & CC_MASK_C;					\
+     cf = CC_NDEP & X86G_CC_MASK_C;				\
      pf = parity_table[(UChar)res];				\
      af = (res ^ argL ^ argR) & 0x10;				\
      zf = ((DATA_UTYPE)res == 0) << 6;				\
@@ -264,7 +264,7 @@
      res  = CC_DEP1;						\
      argL = res + 1;						\
      argR = 1;							\
-     cf = CC_NDEP & CC_MASK_C;					\
+     cf = CC_NDEP & X86G_CC_MASK_C;				\
      pf = parity_table[(UChar)res];				\
      af = (res ^ argL ^ argR) & 0x10;				\
      zf = ((DATA_UTYPE)res == 0) << 6;				\
@@ -281,13 +281,14 @@
 {								\
    PREAMBLE(DATA_BITS);						\
    { Int cf, pf, af, zf, sf, of;				\
-     cf = (CC_DEP2 >> (DATA_BITS - 1)) & CC_MASK_C;		\
+     cf = (CC_DEP2 >> (DATA_BITS - 1)) & X86G_CC_MASK_C;	\
      pf = parity_table[(UChar)CC_DEP1];				\
      af = 0; /* undefined */					\
      zf = ((DATA_UTYPE)CC_DEP1 == 0) << 6;			\
      sf = lshift(CC_DEP1, 8 - DATA_BITS) & 0x80;		\
      /* of is defined if shift count == 1 */			\
-     of = lshift(CC_DEP2 ^ CC_DEP1, 12 - DATA_BITS) & CC_MASK_O;\
+     of = lshift(CC_DEP2 ^ CC_DEP1, 12 - DATA_BITS) 		\
+          & X86G_CC_MASK_O;					\
      return cf | pf | af | zf | sf | of;			\
    }								\
 }
@@ -304,7 +305,8 @@
      zf = ((DATA_UTYPE)CC_DEP1 == 0) << 6;			\
      sf = lshift(CC_DEP1, 8 - DATA_BITS) & 0x80;		\
      /* of is defined if shift count == 1 */			\
-     of = lshift(CC_DEP2 ^ CC_DEP1, 12 - DATA_BITS) & CC_MASK_O;\
+     of = lshift(CC_DEP2 ^ CC_DEP1, 12 - DATA_BITS)		\
+          & X86G_CC_MASK_O;					\
      return cf | pf | af | zf | sf | of;			\
    }								\
 }
@@ -317,9 +319,10 @@
 {								\
    PREAMBLE(DATA_BITS);						\
    { Int fl 							\
-        = (CC_NDEP & ~(CC_MASK_O | CC_MASK_C))			\
-          | (CC_MASK_C & CC_DEP1)				\
-          | (CC_MASK_O & (lshift(CC_DEP1, 11-(DATA_BITS-1)) 	\
+        = (CC_NDEP & ~(X86G_CC_MASK_O | X86G_CC_MASK_C))	\
+          | (X86G_CC_MASK_C & CC_DEP1)				\
+          | (X86G_CC_MASK_O & (lshift(CC_DEP1,  		\
+                                      11-(DATA_BITS-1)) 	\
                      ^ lshift(CC_DEP1, 11)));			\
      return fl;							\
    }								\
@@ -333,9 +336,10 @@
 {								\
    PREAMBLE(DATA_BITS);						\
    { Int fl 							\
-        = (CC_NDEP & ~(CC_MASK_O | CC_MASK_C))			\
-          | (CC_MASK_C & (CC_DEP1 >> (DATA_BITS-1)))		\
-          | (CC_MASK_O & (lshift(CC_DEP1, 11-(DATA_BITS-1)) 	\
+        = (CC_NDEP & ~(X86G_CC_MASK_O | X86G_CC_MASK_C))	\
+          | (X86G_CC_MASK_C & (CC_DEP1 >> (DATA_BITS-1)))	\
+          | (X86G_CC_MASK_O & (lshift(CC_DEP1, 			\
+                                      11-(DATA_BITS-1)) 	\
                      ^ lshift(CC_DEP1, 11-(DATA_BITS-1)+1)));	\
      return fl;							\
    }								\
@@ -388,8 +392,8 @@
 
 #if PROFILE_EFLAGS
 
-static UInt tabc[CC_OP_NUMBER];
-static UInt tab[CC_OP_NUMBER][16];
+static UInt tabc[X86G_CC_OP_NUMBER];
+static UInt tab[X86G_CC_OP_NUMBER][16];
 static Bool initted     = False;
 static UInt n_calc_cond = 0;
 static UInt n_calc_all  = 0;
@@ -405,7 +409,7 @@
               "    S   NS    P   NP    L   NL   LE  NLE\n");
    vex_printf("     ----------------------------------------------"
               "----------------------------------------\n");
-   for (op = 0; op < CC_OP_NUMBER; op++) {
+   for (op = 0; op < X86G_CC_OP_NUMBER; op++) {
 
       ch = ' ';
       if (op > 0 && (op-1) % 3 == 0) 
@@ -437,7 +441,7 @@
 {
    Int op, co;
    initted = True;
-   for (op = 0; op < CC_OP_NUMBER; op++) {
+   for (op = 0; op < X86G_CC_OP_NUMBER; op++) {
       tabc[op] = 0;
       for (co = 0; co < 16; co++)
          tab[op][co] = 0;
@@ -448,109 +452,111 @@
 
 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
 /* Calculate all the 6 flags from the supplied thunk parameters. */
-UInt calculate_eflags_all ( UInt cc_op, 
-                            UInt cc_dep1_formal, 
-                            UInt cc_dep2_formal,
-                            UInt cc_ndep_formal )
+UInt x86g_calculate_eflags_all ( UInt cc_op, 
+                                 UInt cc_dep1_formal, 
+                                 UInt cc_dep2_formal,
+                                UInt cc_ndep_formal )
 {
 #  if PROFILE_EFLAGS
    n_calc_all++;
 #  endif
    switch (cc_op) {
-      case CC_OP_COPY:
+      case X86G_CC_OP_COPY:
          return cc_dep1_formal
-                & (CC_MASK_O | CC_MASK_S | CC_MASK_Z 
-                   | CC_MASK_A | CC_MASK_C | CC_MASK_P);
+                & (X86G_CC_MASK_O | X86G_CC_MASK_S | X86G_CC_MASK_Z 
+                   | X86G_CC_MASK_A | X86G_CC_MASK_C | X86G_CC_MASK_P);
 
-      case CC_OP_ADDB:   ACTIONS_ADD( 8,  UChar  );
-      case CC_OP_ADDW:   ACTIONS_ADD( 16, UShort );
-      case CC_OP_ADDL:   ACTIONS_ADD( 32, UInt   );
+      case X86G_CC_OP_ADDB:   ACTIONS_ADD( 8,  UChar  );
+      case X86G_CC_OP_ADDW:   ACTIONS_ADD( 16, UShort );
+      case X86G_CC_OP_ADDL:   ACTIONS_ADD( 32, UInt   );
 
-      case CC_OP_ADCB:   ACTIONS_ADC( 8,  UChar  );
-      case CC_OP_ADCW:   ACTIONS_ADC( 16, UShort );
-      case CC_OP_ADCL:   ACTIONS_ADC( 32, UInt   );
+      case X86G_CC_OP_ADCB:   ACTIONS_ADC( 8,  UChar  );
+      case X86G_CC_OP_ADCW:   ACTIONS_ADC( 16, UShort );
+      case X86G_CC_OP_ADCL:   ACTIONS_ADC( 32, UInt   );
 
-      case CC_OP_SUBB:   ACTIONS_SUB(  8, UChar  );
-      case CC_OP_SUBW:   ACTIONS_SUB( 16, UShort );
-      case CC_OP_SUBL:   ACTIONS_SUB( 32, UInt   );
+      case X86G_CC_OP_SUBB:   ACTIONS_SUB(  8, UChar  );
+      case X86G_CC_OP_SUBW:   ACTIONS_SUB( 16, UShort );
+      case X86G_CC_OP_SUBL:   ACTIONS_SUB( 32, UInt   );
 
-      case CC_OP_SBBB:   ACTIONS_SBB(  8, UChar  );
-      case CC_OP_SBBW:   ACTIONS_SBB( 16, UShort );
-      case CC_OP_SBBL:   ACTIONS_SBB( 32, UInt   );
+      case X86G_CC_OP_SBBB:   ACTIONS_SBB(  8, UChar  );
+      case X86G_CC_OP_SBBW:   ACTIONS_SBB( 16, UShort );
+      case X86G_CC_OP_SBBL:   ACTIONS_SBB( 32, UInt   );
 
-      case CC_OP_LOGICB: ACTIONS_LOGIC(  8, UChar  );
-      case CC_OP_LOGICW: ACTIONS_LOGIC( 16, UShort );
-      case CC_OP_LOGICL: ACTIONS_LOGIC( 32, UInt   );
+      case X86G_CC_OP_LOGICB: ACTIONS_LOGIC(  8, UChar  );
+      case X86G_CC_OP_LOGICW: ACTIONS_LOGIC( 16, UShort );
+      case X86G_CC_OP_LOGICL: ACTIONS_LOGIC( 32, UInt   );
 
-      case CC_OP_INCB:   ACTIONS_INC(  8, UChar  );
-      case CC_OP_INCW:   ACTIONS_INC( 16, UShort );
-      case CC_OP_INCL:   ACTIONS_INC( 32, UInt   );
+      case X86G_CC_OP_INCB:   ACTIONS_INC(  8, UChar  );
+      case X86G_CC_OP_INCW:   ACTIONS_INC( 16, UShort );
+      case X86G_CC_OP_INCL:   ACTIONS_INC( 32, UInt   );
 
-      case CC_OP_DECB:   ACTIONS_DEC(  8, UChar  );
-      case CC_OP_DECW:   ACTIONS_DEC( 16, UShort );
-      case CC_OP_DECL:   ACTIONS_DEC( 32, UInt   );
+      case X86G_CC_OP_DECB:   ACTIONS_DEC(  8, UChar  );
+      case X86G_CC_OP_DECW:   ACTIONS_DEC( 16, UShort );
+      case X86G_CC_OP_DECL:   ACTIONS_DEC( 32, UInt   );
 
-      case CC_OP_SHLB:   ACTIONS_SHL(  8, UChar  );
-      case CC_OP_SHLW:   ACTIONS_SHL( 16, UShort );
-      case CC_OP_SHLL:   ACTIONS_SHL( 32, UInt   );
+      case X86G_CC_OP_SHLB:   ACTIONS_SHL(  8, UChar  );
+      case X86G_CC_OP_SHLW:   ACTIONS_SHL( 16, UShort );
+      case X86G_CC_OP_SHLL:   ACTIONS_SHL( 32, UInt   );
 
-      case CC_OP_SHRB:   ACTIONS_SHR(  8, UChar  );
-      case CC_OP_SHRW:   ACTIONS_SHR( 16, UShort );
-      case CC_OP_SHRL:   ACTIONS_SHR( 32, UInt   );
+      case X86G_CC_OP_SHRB:   ACTIONS_SHR(  8, UChar  );
+      case X86G_CC_OP_SHRW:   ACTIONS_SHR( 16, UShort );
+      case X86G_CC_OP_SHRL:   ACTIONS_SHR( 32, UInt   );
 
-      case CC_OP_ROLB:   ACTIONS_ROL(  8, UChar  );
-      case CC_OP_ROLW:   ACTIONS_ROL( 16, UShort );
-      case CC_OP_ROLL:   ACTIONS_ROL( 32, UInt   );
+      case X86G_CC_OP_ROLB:   ACTIONS_ROL(  8, UChar  );
+      case X86G_CC_OP_ROLW:   ACTIONS_ROL( 16, UShort );
+      case X86G_CC_OP_ROLL:   ACTIONS_ROL( 32, UInt   );
 
-      case CC_OP_RORB:   ACTIONS_ROR(  8, UChar  );
-      case CC_OP_RORW:   ACTIONS_ROR( 16, UShort );
-      case CC_OP_RORL:   ACTIONS_ROR( 32, UInt   );
+      case X86G_CC_OP_RORB:   ACTIONS_ROR(  8, UChar  );
+      case X86G_CC_OP_RORW:   ACTIONS_ROR( 16, UShort );
+      case X86G_CC_OP_RORL:   ACTIONS_ROR( 32, UInt   );
 
-      case CC_OP_UMULB:  ACTIONS_UMUL(  8, UChar,  UShort );
-      case CC_OP_UMULW:  ACTIONS_UMUL( 16, UShort, UInt   );
-      case CC_OP_UMULL:  ACTIONS_UMUL( 32, UInt,   ULong  );
+      case X86G_CC_OP_UMULB:  ACTIONS_UMUL(  8, UChar,  UShort );
+      case X86G_CC_OP_UMULW:  ACTIONS_UMUL( 16, UShort, UInt   );
+      case X86G_CC_OP_UMULL:  ACTIONS_UMUL( 32, UInt,   ULong  );
 
-      case CC_OP_SMULB:  ACTIONS_SMUL(  8, Char,   Short );
-      case CC_OP_SMULW:  ACTIONS_SMUL( 16, Short,  Int   );
-      case CC_OP_SMULL:  ACTIONS_SMUL( 32, Int,    Long  );
+      case X86G_CC_OP_SMULB:  ACTIONS_SMUL(  8, Char,   Short );
+      case X86G_CC_OP_SMULW:  ACTIONS_SMUL( 16, Short,  Int   );
+      case X86G_CC_OP_SMULL:  ACTIONS_SMUL( 32, Int,    Long  );
 
       default:
          /* shouldn't really make these calls from generated code */
-         vex_printf("calculate_eflags_all( %d, 0x%x, 0x%x, 0x%x )\n",
+         vex_printf("calculate_eflags_all(X86)( %d, 0x%x, 0x%x, 0x%x )\n",
                     cc_op, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal );
-         vpanic("calculate_eflags_all");
+         vpanic("calculate_eflags_all(X86)");
    }
 }
 
 
 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
 /* Calculate just the carry flag from the supplied thunk parameters. */
-UInt calculate_eflags_c ( UInt cc_op, 
-                          UInt cc_dep1, 
-                          UInt cc_dep2,
-                          UInt cc_ndep )
+UInt x86g_calculate_eflags_c ( UInt cc_op, 
+                               UInt cc_dep1, 
+                               UInt cc_dep2,
+                               UInt cc_ndep )
 {
    /* Fast-case some common ones. */
    switch (cc_op) {
-      case CC_OP_LOGICL: case CC_OP_LOGICW: case CC_OP_LOGICB:
+      case X86G_CC_OP_LOGICL: 
+      case X86G_CC_OP_LOGICW: 
+      case X86G_CC_OP_LOGICB:
          return 0;
-      case CC_OP_SUBL:
+      case X86G_CC_OP_SUBL:
          return ((UInt)cc_dep1) < ((UInt)cc_dep2)
-                   ? CC_MASK_C : 0;
+                   ? X86G_CC_MASK_C : 0;
 #if 0
-      case CC_OP_SUBB:
+      case X86G_CC_OP_SUBB:
          return ((UInt)(cc_dep1 & 0xFF)) < ((UInt)(cc_dep2 & 0xFF))
-                   ? CC_MASK_C : 0;
+                   ? X86G_CC_MASK_C : 0;
 #endif
 #if 0
-      case CC_OP_DECL:
+      case X86G_CC_OP_DECL:
          return cc_src;
-      case CC_OP_ADDL:
+      case X86G_CC_OP_ADDL:
          return ( ((UInt)cc_src + (UInt)cc_dst) < ((UInt)cc_src) ) 
-                   ? CC_MASK_C : 0;
-      case CC_OP_SUBB:
+                   ? X86G_CC_MASK_C : 0;
+      case X86G_CC_OP_SUBB:
          return ( ((UInt)(cc_src & 0xFF)) > ((UInt)(cc_dst & 0xFF)) ) 
-                   ? CC_MASK_C : 0;
+                   ? X86G_CC_MASK_C : 0;
 #endif
       default: 
          break;
@@ -563,19 +569,21 @@
 
    n_calc_c++;
 #  endif
-   return calculate_eflags_all(cc_op,cc_dep1,cc_dep2,cc_ndep) & CC_MASK_C;
+   return x86g_calculate_eflags_all(cc_op,cc_dep1,cc_dep2,cc_ndep) 
+          & X86G_CC_MASK_C;
 }
 
 
 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
 /* returns 1 or 0 */
-/*static*/ UInt calculate_condition ( UInt/*Condcode*/ cond, 
-                                      UInt cc_op, 
-                                      UInt cc_dep1, 
-                                      UInt cc_dep2,
-                                      UInt cc_ndep )
+UInt x86g_calculate_condition ( UInt/*X86Condcode*/ cond, 
+                                UInt cc_op, 
+                                UInt cc_dep1, 
+                                UInt cc_dep2,
+                                UInt cc_ndep )
 {
-   UInt eflags = calculate_eflags_all(cc_op, cc_dep1, cc_dep2, cc_ndep);
+   UInt eflags = x86g_calculate_eflags_all(cc_op, cc_dep1, 
+                                           cc_dep2, cc_ndep);
    UInt of,sf,zf,cf,pf;
    UInt inv = cond & 1;
 
@@ -590,51 +598,51 @@
 #  endif
 
    switch (cond) {
-      case CondNO:
-      case CondO: /* OF == 1 */
-         of = eflags >> CC_SHIFT_O;
+      case X86CondNO:
+      case X86CondO: /* OF == 1 */
+         of = eflags >> X86G_CC_SHIFT_O;
          return 1 & (inv ^ of);
 
-      case CondNZ:
-      case CondZ: /* ZF == 1 */
-         zf = eflags >> CC_SHIFT_Z;
+      case X86CondNZ:
+      case X86CondZ: /* ZF == 1 */
+         zf = eflags >> X86G_CC_SHIFT_Z;
          return 1 & (inv ^ zf);
 
-      case CondNB:
-      case CondB: /* CF == 1 */
-         cf = eflags >> CC_SHIFT_C;
+      case X86CondNB:
+      case X86CondB: /* CF == 1 */
+         cf = eflags >> X86G_CC_SHIFT_C;
          return 1 & (inv ^ cf);
          break;
 
-      case CondNBE:
-      case CondBE: /* (CF or ZF) == 1 */
-         cf = eflags >> CC_SHIFT_C;
-         zf = eflags >> CC_SHIFT_Z;
+      case X86CondNBE:
+      case X86CondBE: /* (CF or ZF) == 1 */
+         cf = eflags >> X86G_CC_SHIFT_C;
+         zf = eflags >> X86G_CC_SHIFT_Z;
          return 1 & (inv ^ (cf | zf));
          break;
 
-      case CondNS:
-      case CondS: /* SF == 1 */
-         sf = eflags >> CC_SHIFT_S;
+      case X86CondNS:
+      case X86CondS: /* SF == 1 */
+         sf = eflags >> X86G_CC_SHIFT_S;
          return 1 & (inv ^ sf);
 
-      case CondNP:
-      case CondP: /* PF == 1 */
-         pf = eflags >> CC_SHIFT_P;
+      case X86CondNP:
+      case X86CondP: /* PF == 1 */
+         pf = eflags >> X86G_CC_SHIFT_P;
          return 1 & (inv ^ pf);
 
-      case CondNL:
-      case CondL: /* (SF xor OF) == 1 */
-         sf = eflags >> CC_SHIFT_S;
-         of = eflags >> CC_SHIFT_O;
+      case X86CondNL:
+      case X86CondL: /* (SF xor OF) == 1 */
+         sf = eflags >> X86G_CC_SHIFT_S;
+         of = eflags >> X86G_CC_SHIFT_O;
          return 1 & (inv ^ (sf ^ of));
          break;
 
-      case CondNLE:
-      case CondLE: /* ((SF xor OF) or ZF)  == 1 */
-         sf = eflags >> CC_SHIFT_S;
-         of = eflags >> CC_SHIFT_O;
-         zf = eflags >> CC_SHIFT_Z;
+      case X86CondNLE:
+      case X86CondLE: /* ((SF xor OF) or ZF)  == 1 */
+         sf = eflags >> X86G_CC_SHIFT_S;
+         of = eflags >> X86G_CC_SHIFT_O;
+         zf = eflags >> X86G_CC_SHIFT_Z;
          return 1 & (inv ^ ((sf ^ of) | zf));
          break;
 
@@ -657,8 +665,8 @@
           && e->Iex.Const.con->Ico.U32 == n;
 }
 
-IRExpr* x86guest_spechelper ( Char* function_name,
-                              IRExpr** args )
+IRExpr* guest_x86_spechelper ( Char* function_name,
+                               IRExpr** args )
 {
 #  define unop(_op,_a1) IRExpr_Unop((_op),(_a1))
 #  define binop(_op,_a1,_a2) IRExpr_Binop((_op),(_a1),(_a2))
@@ -688,16 +696,16 @@
       cc_dep2 = args[2];
       cc_ndep = args[3];
 
-      if (isU32(cc_op, CC_OP_SUBL)) {
+      if (isU32(cc_op, X86G_CC_OP_SUBL)) {
          /* C after sub denotes unsigned less than */
          return unop(Iop_1Uto32,
                      binop(Iop_CmpLT32U, cc_dep1, cc_dep2));
       }
-      if (isU32(cc_op, CC_OP_LOGICL)) {
+      if (isU32(cc_op, X86G_CC_OP_LOGICL)) {
          /* cflag after logic is zero */
          return mkU32(0);
       }
-      if (isU32(cc_op, CC_OP_DECL) || isU32(cc_op, CC_OP_INCL)) {
+      if (isU32(cc_op, X86G_CC_OP_DECL) || isU32(cc_op, X86G_CC_OP_INCL)) {
          /* If the thunk is dec or inc, the cflag is supplied as CC_NDEP. */
          return cc_ndep;
       }
@@ -724,7 +732,7 @@
 
       /*---------------- ADDL ----------------*/
 
-      if (isU32(cc_op, CC_OP_ADDL) && isU32(cond, CondZ)) {
+      if (isU32(cc_op, X86G_CC_OP_ADDL) && isU32(cond, X86CondZ)) {
          /* long add, then Z --> test (dst+src == 0) */
          return unop(Iop_1Uto32,
                      binop(Iop_CmpEQ32, 
@@ -734,34 +742,34 @@
 
       /*---------------- SUBL ----------------*/
 
-      if (isU32(cc_op, CC_OP_SUBL) && isU32(cond, CondZ)) {
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondZ)) {
          /* long sub/cmp, then Z --> test dst==src */
          return unop(Iop_1Uto32,
                      binop(Iop_CmpEQ32, cc_dep1, cc_dep2));
       }
 
-      if (isU32(cc_op, CC_OP_SUBL) && isU32(cond, CondL)) {
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondL)) {
          /* long sub/cmp, then L (signed less than) 
             --> test dst <s src */
          return unop(Iop_1Uto32,
                      binop(Iop_CmpLT32S, cc_dep1, cc_dep2));
       }
 
-      if (isU32(cc_op, CC_OP_SUBL) && isU32(cond, CondLE)) {
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondLE)) {
          /* long sub/cmp, then LE (signed less than or equal)
             --> test dst <=s src */
          return unop(Iop_1Uto32,
                      binop(Iop_CmpLE32S, cc_dep1, cc_dep2));
       }
 
-      if (isU32(cc_op, CC_OP_SUBL) && isU32(cond, CondBE)) {
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondBE)) {
          /* long sub/cmp, then BE (unsigned less than or equal)
             --> test dst <=u src */
          return unop(Iop_1Uto32,
                      binop(Iop_CmpLE32U, cc_dep1, cc_dep2));
       }
 #if 0
-      if (isU32(cc_op, CC_OP_SUBL) && isU32(cond, CondB)) {
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondB)) {
          /* long sub/cmp, then B (unsigned less than)
             --> test dst <u src */
          return unop(Iop_1Uto32,
@@ -770,7 +778,7 @@
 #endif
       /*---------------- SUBW ----------------*/
 
-      if (isU32(cc_op, CC_OP_SUBW) && isU32(cond, CondZ)) {
+      if (isU32(cc_op, X86G_CC_OP_SUBW) && isU32(cond, X86CondZ)) {
          /* byte sub/cmp, then Z --> test dst==src */
          return unop(Iop_1Uto32,
                      binop(Iop_CmpEQ16, 
@@ -780,7 +788,7 @@
 
       /*---------------- SUBB ----------------*/
 
-      if (isU32(cc_op, CC_OP_SUBB) && isU32(cond, CondZ)) {
+      if (isU32(cc_op, X86G_CC_OP_SUBB) && isU32(cond, X86CondZ)) {
          /* byte sub/cmp, then Z --> test dst==src */
          return unop(Iop_1Uto32,
                      binop(Iop_CmpEQ8, 
@@ -788,7 +796,7 @@
                            unop(Iop_32to8,cc_dep2)));
       }
 
-      if (isU32(cc_op, CC_OP_SUBB) && isU32(cond, CondNZ)) {
+      if (isU32(cc_op, X86G_CC_OP_SUBB) && isU32(cond, X86CondNZ)) {
          /* byte sub/cmp, then NZ --> test dst!=src */
          return unop(Iop_1Uto32,
                      binop(Iop_CmpNE8, 
@@ -796,7 +804,7 @@
                            unop(Iop_32to8,cc_dep2)));
       }
 
-      if (isU32(cc_op, CC_OP_SUBB) && isU32(cond, CondNBE)) {
+      if (isU32(cc_op, X86G_CC_OP_SUBB) && isU32(cond, X86CondNBE)) {
          /* long sub/cmp, then NBE (unsigned greater than)
             --> test src <=u dst */
          /* Note, args are opposite way round from the usual */
@@ -808,17 +816,17 @@
 
       /*---------------- LOGICL ----------------*/
 
-      if (isU32(cc_op, CC_OP_LOGICL) && isU32(cond, CondZ)) {
+      if (isU32(cc_op, X86G_CC_OP_LOGICL) && isU32(cond, X86CondZ)) {
          /* long and/or/xor, then Z --> test dst==0 */
          return unop(Iop_1Uto32,binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));
       }
 
-      if (isU32(cc_op, CC_OP_LOGICL) && isU32(cond, CondS)) {
+      if (isU32(cc_op, X86G_CC_OP_LOGICL) && isU32(cond, X86CondS)) {
          /* long and/or/xor, then S --> test dst <s 0 */
          return unop(Iop_1Uto32,binop(Iop_CmpLT32S, cc_dep1, mkU32(0)));
       }
 
-      if (isU32(cc_op, CC_OP_LOGICL) && isU32(cond, CondLE)) {
+      if (isU32(cc_op, X86G_CC_OP_LOGICL) && isU32(cond, X86CondLE)) {
          /* long and/or/xor, then LE
             This is pretty subtle.  LOGIC sets SF and ZF according to the
             result and makes OF be zero.  LE computes (SZ ^ OF) | ZF, but
@@ -830,7 +838,7 @@
 
       /*---------------- LOGICW ----------------*/
 
-      if (isU32(cc_op, CC_OP_LOGICW) && isU32(cond, CondZ)) {
+      if (isU32(cc_op, X86G_CC_OP_LOGICW) && isU32(cond, X86CondZ)) {
          /* byte and/or/xor, then Z --> test dst==0 */
          return unop(Iop_1Uto32,
                      binop(Iop_CmpEQ32, binop(Iop_And32,cc_dep1,mkU32(0xFFFF)), 
@@ -839,7 +847,7 @@
 
       /*---------------- LOGICB ----------------*/
 
-      if (isU32(cc_op, CC_OP_LOGICB) && isU32(cond, CondZ)) {
+      if (isU32(cc_op, X86G_CC_OP_LOGICB) && isU32(cond, X86CondZ)) {
          /* byte and/or/xor, then Z --> test dst==0 */
          return unop(Iop_1Uto32,
                      binop(Iop_CmpEQ32, binop(Iop_And32,cc_dep1,mkU32(255)), 
@@ -848,19 +856,19 @@
 
       /*---------------- DECL ----------------*/
 
-      if (isU32(cc_op, CC_OP_DECL) && isU32(cond, CondZ)) {
+      if (isU32(cc_op, X86G_CC_OP_DECL) && isU32(cond, X86CondZ)) {
          /* dec L, then Z --> test dst == 0 */
          return unop(Iop_1Uto32,binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));
       }
 
-      if (isU32(cc_op, CC_OP_DECL) && isU32(cond, CondS)) {
+      if (isU32(cc_op, X86G_CC_OP_DECL) && isU32(cond, X86CondS)) {
          /* dec L, then S --> compare DST <s 0 */
          return unop(Iop_1Uto32,binop(Iop_CmpLT32S, cc_dep1, mkU32(0)));
       }
 
       /*---------------- SHRL ----------------*/
 
-      if (isU32(cc_op, CC_OP_SHRL) && isU32(cond, CondZ)) {
+      if (isU32(cc_op, X86G_CC_OP_SHRL) && isU32(cond, X86CondZ)) {
          /* SHRL, then Z --> test dep1 == 0 */
          return unop(Iop_1Uto32,binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));
       }
@@ -919,7 +927,7 @@
 }
 
 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
-UInt calculate_FXAM ( UInt tag, ULong dbl ) 
+UInt x86g_calculate_FXAM ( UInt tag, ULong dbl ) 
 {
    Bool   mantissaIsZero;
    Int    bexp;
@@ -940,7 +948,7 @@
       return 1,0,sign,1 */
    if (tag == 0) {
       /* vex_printf("Empty\n"); */
-      return FC_MASK_C3 | 0 | sign | FC_MASK_C0;
+      return X86G_FC_MASK_C3 | 0 | sign | X86G_FC_MASK_C0;
    }
 
    bexp = (f64[7] << 4) | ((f64[6] >> 4) & 0x0F);
@@ -956,35 +964,35 @@
       Return 1,0,sign,0. */
    if (bexp == 0 && mantissaIsZero) {
       /* vex_printf("Zero\n"); */
-      return FC_MASK_C3 | 0 | sign | 0;
+      return X86G_FC_MASK_C3 | 0 | sign | 0;
    }
    
    /* If exponent is zero but mantissa isn't, it's a denormal.
       Return 1,1,sign,0. */
    if (bexp == 0 && !mantissaIsZero) {
       /* vex_printf("Denormal\n"); */
-      return FC_MASK_C3 | FC_MASK_C2 | sign | 0;
+      return X86G_FC_MASK_C3 | X86G_FC_MASK_C2 | sign | 0;
    }
 
    /* If the exponent is 7FF and the mantissa is zero, this is an infinity.
       Return 0,1,sign,1. */
    if (bexp == 0x7FF && mantissaIsZero) {
       /* vex_printf("Inf\n"); */
-      return 0 | FC_MASK_C2 | sign | FC_MASK_C0;
+      return 0 | X86G_FC_MASK_C2 | sign | X86G_FC_MASK_C0;
    }
 
    /* If the exponent is 7FF and the mantissa isn't zero, this is a NaN.
       Return 0,0,sign,1. */
    if (bexp == 0x7FF && !mantissaIsZero) {
       /* vex_printf("NaN\n"); */
-      return 0 | 0 | sign | FC_MASK_C0;
+      return 0 | 0 | sign | X86G_FC_MASK_C0;
    }
 
    /* Uh, ok, we give up.  It must be a normal finite number.
       Return 0,1,sign,0.
    */
    /* vex_printf("normal\n"); */
-   return 0 | FC_MASK_C2 | sign | 0;
+   return 0 | X86G_FC_MASK_C2 | sign | 0;
 }
 
 
@@ -1339,7 +1347,7 @@
 
 /* CALLED FROM GENERATED CODE */
 /* DIRTY HELPER (reads guest memory) */
-ULong loadF80le ( UInt addrU )
+ULong x86g_loadF80le ( UInt addrU )
 {
    ULong f64;
    convert_f80le_to_f64le ( (UChar*)addrU, (UChar*)&f64 );
@@ -1348,7 +1356,7 @@
 
 /* CALLED FROM GENERATED CODE */
 /* DIRTY HELPER (writes guest memory) */
-void storeF80le ( UInt addrU, ULong f64 )
+void x86g_storeF80le ( UInt addrU, ULong f64 )
 {
    convert_f64le_to_f80le( (UChar*)&f64, (UChar*)addrU );
 }
@@ -1463,10 +1471,10 @@
 
    /* Mask out everything except O S Z A C P. */
    eflags_native
-      &= (CC_MASK_C | CC_MASK_P | CC_MASK_A 
-          | CC_MASK_Z | CC_MASK_S | CC_MASK_O);
+      &= (X86G_CC_MASK_C | X86G_CC_MASK_P | X86G_CC_MASK_A 
+          | X86G_CC_MASK_Z | X86G_CC_MASK_S | X86G_CC_MASK_O);
 
-   vex_state->guest_CC_OP   = CC_OP_COPY;
+   vex_state->guest_CC_OP   = X86G_CC_OP_COPY;
    vex_state->guest_CC_DEP1 = eflags_native;
    vex_state->guest_CC_DEP2 = 0;
    vex_state->guest_CC_NDEP = 0; /* unnecessary paranoia */
@@ -1476,7 +1484,7 @@
 /* VISIBLE TO LIBVEX CLIENT */
 UInt LibVEX_GuestX86_get_eflags ( /*IN*/VexGuestX86State* vex_state )
 {
-   UInt eflags = calculate_eflags_all(
+   UInt eflags = x86g_calculate_eflags_all(
                     vex_state->guest_CC_OP,
                     vex_state->guest_CC_DEP1,
                     vex_state->guest_CC_DEP2,
@@ -1506,7 +1514,7 @@
    vex_state->guest_ESI = 0;
    vex_state->guest_EDI = 0;
 
-   vex_state->guest_CC_OP   = CC_OP_COPY;
+   vex_state->guest_CC_OP   = X86G_CC_OP_COPY;
    vex_state->guest_CC_DEP1 = 0;
    vex_state->guest_CC_DEP2 = 0;
    vex_state->guest_CC_NDEP = 0;
@@ -1543,13 +1551,13 @@
    through the carry bit.  Result in low 32 bits, 
    new flags (OSZACP) in high 32 bits.
 */
-ULong calculate_RCR ( UInt arg, UInt rot_amt, UInt eflags_in, UInt sz )
+ULong x86g_calculate_RCR ( UInt arg, UInt rot_amt, UInt eflags_in, UInt sz )
 {
    UInt tempCOUNT = rot_amt & 0x1F, cf=0, of=0, tempcf;
 
    switch (sz) {
       case 4:
-         cf        = (eflags_in >> CC_SHIFT_C) & 1;
+         cf        = (eflags_in >> X86G_CC_SHIFT_C) & 1;
          of        = ((arg >> 31) ^ cf) & 1;
          while (tempCOUNT > 0) {
             tempcf = arg & 1;
@@ -1560,7 +1568,7 @@
          break;
       case 2:
          while (tempCOUNT >= 17) tempCOUNT -= 17;
-         cf        = (eflags_in >> CC_SHIFT_C) & 1;
+         cf        = (eflags_in >> X86G_CC_SHIFT_C) & 1;
          of        = ((arg >> 15) ^ cf) & 1;
          while (tempCOUNT > 0) {
             tempcf = arg & 1;
@@ -1571,7 +1579,7 @@
          break;
       case 1:
          while (tempCOUNT >= 9) tempCOUNT -= 9;
-         cf        = (eflags_in >> CC_SHIFT_C) & 1;
+         cf        = (eflags_in >> X86G_CC_SHIFT_C) & 1;
          of        = ((arg >> 7) ^ cf) & 1;
          while (tempCOUNT > 0) {
             tempcf = arg & 1;
@@ -1586,8 +1594,8 @@
 
    cf &= 1;
    of &= 1;
-   eflags_in &= ~(CC_MASK_C | CC_MASK_O);
-   eflags_in |= (cf << CC_SHIFT_C) | (of << CC_SHIFT_O);
+   eflags_in &= ~(X86G_CC_MASK_C | X86G_CC_MASK_O);
+   eflags_in |= (cf << X86G_CC_SHIFT_C) | (of << X86G_CC_SHIFT_O);
 
    return (((ULong)eflags_in) << 32) | ((ULong)arg);
 }
@@ -1596,7 +1604,7 @@
 /* CALLED FROM GENERATED CODE */
 /* DIRTY HELPER (modifies guest state) */
 /* Claim to be a P54C P133 (pre-MMX Pentium) */
-void dirtyhelper_CPUID ( VexGuestX86State* st )
+void x86g_dirtyhelper_CPUID ( VexGuestX86State* st )
 {
    if (st->guest_EAX == 0) {
       st->guest_EAX = 0x1;
@@ -1889,7 +1897,7 @@
 
 /* ------------ Normal addition ------------ */
 
-ULong calculate_add32x2 ( ULong xx, ULong yy )
+ULong x86g_calculate_add32x2 ( ULong xx, ULong yy )
 {
    return mk32x2(
              sel32x2_1(xx) + sel32x2_1(yy),
@@ -1897,7 +1905,7 @@
           );
 }
 
-ULong calculate_add16x4 ( ULong xx, ULong yy )
+ULong x86g_calculate_add16x4 ( ULong xx, ULong yy )
 {
    return mk16x4(
              sel16x4_3(xx) + sel16x4_3(yy),
@@ -1907,7 +1915,7 @@
           );
 }
 
-ULong calculate_add8x8 ( ULong xx, ULong yy )
+ULong x86g_calculate_add8x8 ( ULong xx, ULong yy )
 {
    return mk8x8(
              sel8x8_7(xx) + sel8x8_7(yy),
@@ -1923,7 +1931,7 @@
 
 /* ------------ Saturating addition ------------ */
 
-ULong calculate_qadd16Sx4 ( ULong xx, ULong yy )
+ULong x86g_calculate_qadd16Sx4 ( ULong xx, ULong yy )
 {
    return mk16x4(
              qadd16S( sel16x4_3(xx), sel16x4_3(yy) ),
@@ -1933,7 +1941,7 @@
           );
 }
 
-ULong calculate_qadd8Sx8 ( ULong xx, ULong yy )
+ULong x86g_calculate_qadd8Sx8 ( ULong xx, ULong yy )
 {
    return mk8x8(
              qadd8S( sel8x8_7(xx), sel8x8_7(yy) ),
@@ -1947,7 +1955,7 @@
           );
 }
 
-ULong calculate_qadd16Ux4 ( ULong xx, ULong yy )
+ULong x86g_calculate_qadd16Ux4 ( ULong xx, ULong yy )
 {
    return mk16x4(
              qadd16U( sel16x4_3(xx), sel16x4_3(yy) ),
@@ -1957,7 +1965,7 @@
           );
 }
 
-ULong calculate_qadd8Ux8 ( ULong xx, ULong yy )
+ULong x86g_calculate_qadd8Ux8 ( ULong xx, ULong yy )
 {
    return mk8x8(
              qadd8U( sel8x8_7(xx), sel8x8_7(yy) ),
@@ -1973,7 +1981,7 @@
 
 /* ------------ Normal subtraction ------------ */
 
-ULong calculate_sub32x2 ( ULong xx, ULong yy )
+ULong x86g_calculate_sub32x2 ( ULong xx, ULong yy )
 {
    return mk32x2(
              sel32x2_1(xx) - sel32x2_1(yy),
@@ -1981,7 +1989,7 @@
           );
 }
 
-ULong calculate_sub16x4 ( ULong xx, ULong yy )
+ULong x86g_calculate_sub16x4 ( ULong xx, ULong yy )
 {
    return mk16x4(
              sel16x4_3(xx) - sel16x4_3(yy),
@@ -1991,7 +1999,7 @@
           );
 }
 
-ULong calculate_sub8x8 ( ULong xx, ULong yy )
+ULong x86g_calculate_sub8x8 ( ULong xx, ULong yy )
 {
    return mk8x8(
              sel8x8_7(xx) - sel8x8_7(yy),
@@ -2007,7 +2015,7 @@
 
 /* ------------ Saturating subtraction ------------ */
 
-ULong calculate_qsub16Sx4 ( ULong xx, ULong yy )
+ULong x86g_calculate_qsub16Sx4 ( ULong xx, ULong yy )
 {
    return mk16x4(
              qsub16S( sel16x4_3(xx), sel16x4_3(yy) ),
@@ -2017,7 +2025,7 @@
           );
 }
 
-ULong calculate_qsub8Sx8 ( ULong xx, ULong yy )
+ULong x86g_calculate_qsub8Sx8 ( ULong xx, ULong yy )
 {
    return mk8x8(
              qsub8S( sel8x8_7(xx), sel8x8_7(yy) ),
@@ -2031,7 +2039,7 @@
           );
 }
 
-ULong calculate_qsub16Ux4 ( ULong xx, ULong yy )
+ULong x86g_calculate_qsub16Ux4 ( ULong xx, ULong yy )
 {
    return mk16x4(
              qsub16U( sel16x4_3(xx), sel16x4_3(yy) ),
@@ -2041,7 +2049,7 @@
           );
 }
 
-ULong calculate_qsub8Ux8 ( ULong xx, ULong yy )
+ULong x86g_calculate_qsub8Ux8 ( ULong xx, ULong yy )
 {
    return mk8x8(
              qsub8U( sel8x8_7(xx), sel8x8_7(yy) ),
@@ -2057,7 +2065,7 @@
 
 /* ------------ Multiplication ------------ */
 
-ULong calculate_mulhi16x4 ( ULong xx, ULong yy )
+ULong x86g_calculate_mulhi16x4 ( ULong xx, ULong yy )
 {
    return mk16x4(
              mulhi16S( sel16x4_3(xx), sel16x4_3(yy) ),
@@ -2067,7 +2075,7 @@
           );
 }
 
-ULong calculate_mullo16x4 ( ULong xx, ULong yy )
+ULong x86g_calculate_mullo16x4 ( ULong xx, ULong yy )
 {
    return mk16x4(
              mullo16S( sel16x4_3(xx), sel16x4_3(yy) ),
@@ -2077,7 +2085,7 @@
           );
 }
 
-ULong calculate_pmaddwd ( ULong xx, ULong yy )
+ULong x86g_calculate_pmaddwd ( ULong xx, ULong yy )
 {
    return
       mk32x2( 
@@ -2090,7 +2098,7 @@
 
 /* ------------ Comparison ------------ */
 
-ULong calculate_cmpeq32x2 ( ULong xx, ULong yy )
+ULong x86g_calculate_cmpeq32x2 ( ULong xx, ULong yy )
 {
    return mk32x2(
              cmpeq32( sel32x2_1(xx), sel32x2_1(yy) ),
@@ -2098,7 +2106,7 @@
           );
 }
 
-ULong calculate_cmpeq16x4 ( ULong xx, ULong yy )
+ULong x86g_calculate_cmpeq16x4 ( ULong xx, ULong yy )
 {
    return mk16x4(
              cmpeq16( sel16x4_3(xx), sel16x4_3(yy) ),
@@ -2108,7 +2116,7 @@
           );
 }
 
-ULong calculate_cmpeq8x8 ( ULong xx, ULong yy )
+ULong x86g_calculate_cmpeq8x8 ( ULong xx, ULong yy )
 {
    return mk8x8(
              cmpeq8( sel8x8_7(xx), sel8x8_7(yy) ),
@@ -2122,7 +2130,7 @@
           );
 }
 
-ULong calculate_cmpge32Sx2 ( ULong xx, ULong yy )
+ULong x86g_calculate_cmpge32Sx2 ( ULong xx, ULong yy )
 {
    return mk32x2(
              cmpge32S( sel32x2_1(xx), sel32x2_1(yy) ),
@@ -2130,7 +2138,7 @@
           );
 }
 
-ULong calculate_cmpge16Sx4 ( ULong xx, ULong yy )
+ULong x86g_calculate_cmpge16Sx4 ( ULong xx, ULong yy )
 {
    return mk16x4(
              cmpge16S( sel16x4_3(xx), sel16x4_3(yy) ),
@@ -2140,7 +2148,7 @@
           );
 }
 
-ULong calculate_cmpge8Sx8 ( ULong xx, ULong yy )
+ULong x86g_calculate_cmpge8Sx8 ( ULong xx, ULong yy )
 {
    return mk8x8(
              cmpge8S( sel8x8_7(xx), sel8x8_7(yy) ),
@@ -2156,7 +2164,7 @@
 
 /* ------------ Pack / unpack ------------ */
 
-ULong calculate_packssdw ( ULong dst, ULong src )
+ULong x86g_calculate_packssdw ( ULong dst, ULong src )
 {
    UInt d = sel32x2_1(dst);
    UInt c = sel32x2_0(dst);
@@ -2173,7 +2181,7 @@
           );
 }
 
-ULong calculate_packsswb ( ULong dst, ULong src )
+ULong x86g_calculate_packsswb ( ULong dst, ULong src )
 {
    UShort h = sel16x4_3(dst);
    UShort g = sel16x4_2(dst);
@@ -2197,7 +2205,7 @@
           );
 }
 
-ULong calculate_packuswb ( ULong dst, ULong src )
+ULong x86g_calculate_packuswb ( ULong dst, ULong src )
 {
    UShort h = sel16x4_3(dst);
    UShort g = sel16x4_2(dst);
@@ -2221,7 +2229,7 @@
           );
 }
 
-ULong calculate_punpckhbw ( ULong dst, ULong src )
+ULong x86g_calculate_punpckhbw ( ULong dst, ULong src )
 {
   return mk8x8(
             sel8x8_7(src),
@@ -2235,7 +2243,7 @@
          );
 }
 
-ULong calculate_punpcklbw ( ULong dst, ULong src )
+ULong x86g_calculate_punpcklbw ( ULong dst, ULong src )
 {
   return mk8x8(
             sel8x8_3(src),
@@ -2249,7 +2257,7 @@
          );
 }
 
-ULong calculate_punpckhwd ( ULong dst, ULong src )
+ULong x86g_calculate_punpckhwd ( ULong dst, ULong src )
 {
   return mk16x4(
             sel16x4_3(src),
@@ -2259,7 +2267,7 @@
          );
 }
 
-ULong calculate_punpcklwd ( ULong dst, ULong src )
+ULong x86g_calculate_punpcklwd ( ULong dst, ULong src )
 {
   return mk16x4(
             sel16x4_1(src),
@@ -2269,7 +2277,7 @@
          );
 }
 
-ULong calculate_punpckhdq ( ULong dst, ULong src )
+ULong x86g_calculate_punpckhdq ( ULong dst, ULong src )
 {
   return mk32x2(
             sel32x2_1(src),
@@ -2277,7 +2285,7 @@
          );
 }
 
-ULong calculate_punpckldq ( ULong dst, ULong src )
+ULong x86g_calculate_punpckldq ( ULong dst, ULong src )
 {
   return mk32x2(
             sel32x2_0(src),
@@ -2287,7 +2295,7 @@
 
 /* ------------ Shifting ------------ */
 
-ULong calculate_shl16x4 ( ULong xx, ULong yy )
+ULong x86g_calculate_shl16x4 ( ULong xx, ULong yy )
 {
    return mk16x4(
              shl16( sel16x4_3(xx), yy ),
@@ -2297,7 +2305,7 @@
           );
 }
 
-ULong calculate_shl32x2 ( ULong xx, ULong yy )
+ULong x86g_calculate_shl32x2 ( ULong xx, ULong yy )
 {
    return mk32x2(
              shl32( sel32x2_1(xx), yy ),
@@ -2306,13 +2314,13 @@
 }
 
 
-ULong calculate_shl64x1 ( ULong xx, ULong yy )
+ULong x86g_calculate_shl64x1 ( ULong xx, ULong yy )
 {
    if (yy > 63) return 0;
    return xx << yy;
 }
 
-ULong calculate_shr16Ux4 ( ULong xx, ULong yy )
+ULong x86g_calculate_shr16Ux4 ( ULong xx, ULong yy )
 {
    return mk16x4(
              shr16U( sel16x4_3(xx), yy ),
@@ -2322,7 +2330,7 @@
           );
 }
 
-ULong calculate_shr32Ux2 ( ULong xx, ULong yy )
+ULong x86g_calculate_shr32Ux2 ( ULong xx, ULong yy )
 {
    return mk32x2(
              shr32U( sel32x2_1(xx), yy ),
@@ -2330,14 +2338,14 @@
           );
 }
 
-ULong calculate_shr64Ux1 ( ULong xx, ULong yy )
+ULong x86g_calculate_shr64Ux1 ( ULong xx, ULong yy )
 {
    if (yy > 63) return 0;
    return xx >> yy;
 }
 
 
-ULong calculate_shr16Sx4 ( ULong xx, ULong yy )
+ULong x86g_calculate_shr16Sx4 ( ULong xx, ULong yy )
 {
    return mk16x4(
              shr16S( sel16x4_3(xx), yy ),
@@ -2347,7 +2355,7 @@
           );
 }
 
-ULong calculate_shr32Sx2 ( ULong xx, ULong yy )
+ULong x86g_calculate_shr32Sx2 ( ULong xx, ULong yy )
 {
    return mk32x2(
              shr32S( sel32x2_1(xx), yy ),
diff --git a/priv/guest-x86/toIR.c b/priv/guest-x86/toIR.c
index da621c3..763de57 100644
--- a/priv/guest-x86/toIR.c
+++ b/priv/guest-x86/toIR.c
@@ -168,12 +168,12 @@
 /* Disassemble a complete basic block, starting at eip, and dumping
    the ucode into cb.  Returns the size, in bytes, of the basic
    block. */
-IRBB* bbToIR_X86Instr ( UChar* x86code, 
-                        Addr64 guest_eip_start, 
-                        Int*   guest_bytes_read, 
-                        Bool   (*byte_accessible)(Addr64),
-                        Bool   (*chase_into_ok)(Addr64),
-                        Bool   host_bigendian )
+IRBB* bbToIR_X86 ( UChar* x86code, 
+                   Addr64 guest_eip_start, 
+                   Int*   guest_bytes_read, 
+                   Bool   (*byte_accessible)(Addr64),
+                   Bool   (*chase_into_ok)(Addr64),
+                   Bool   host_bigendian )
 {
    UInt       delta;
    Int        i, n_instrs, size, first_stmt_idx;
@@ -621,7 +621,7 @@
 /* Build IR to calculate all the eflags from stored
    CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression ::
    Ity_I32. */
-static IRExpr* mk_calculate_eflags_all ( void )
+static IRExpr* mk_x86g_calculate_eflags_all ( void )
 {
    IRExpr** args
       = mkIRExprVec_4( IRExpr_Get(OFFB_CC_OP,   Ity_I32),
@@ -632,7 +632,7 @@
       = mkIRExprCCall(
            Ity_I32,
            0/*regparm*/, 
-           "calculate_eflags_all", &calculate_eflags_all,
+           "x86g_calculate_eflags_all", &x86g_calculate_eflags_all,
            args
         );
    /* Exclude OP and NDEP from definedness checking.  We're only
@@ -644,7 +644,7 @@
 /* Build IR to calculate some particular condition from stored
    CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression ::
    Ity_Bit. */
-static IRExpr* mk_calculate_condition ( Condcode cond )
+static IRExpr* mk_x86g_calculate_condition ( X86Condcode cond )
 {
    IRExpr** args
       = mkIRExprVec_5( mkU32(cond),
@@ -656,7 +656,7 @@
       = mkIRExprCCall(
            Ity_I32,
            0/*regparm*/, 
-           "calculate_condition", &calculate_condition,
+           "x86g_calculate_condition", &x86g_calculate_condition,
            args
         );
    /* Exclude the requested condition, OP and NDEP from definedness
@@ -667,7 +667,7 @@
 
 /* Build IR to calculate just the carry flag from stored
    CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression :: Ity_I32. */
-static IRExpr* mk_calculate_eflags_c ( void )
+static IRExpr* mk_x86g_calculate_eflags_c ( void )
 {
    IRExpr** args
       = mkIRExprVec_4( IRExpr_Get(OFFB_CC_OP,   Ity_I32),
@@ -678,7 +678,7 @@
       = mkIRExprCCall(
            Ity_I32,
            0/*regparm*/, 
-           "calculate_eflags_c", &calculate_eflags_c,
+           "x86g_calculate_eflags_c", &x86g_calculate_eflags_c,
            args
         );
    /* Exclude OP and NDEP from definedness checking.  We're only
@@ -758,8 +758,8 @@
    vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32);
 
    switch (op8) {
-      case Iop_Add8: ccOp += CC_OP_ADDB;   break;
-      case Iop_Sub8: ccOp += CC_OP_SUBB;   break;
+      case Iop_Add8: ccOp += X86G_CC_OP_ADDB;   break;
+      case Iop_Sub8: ccOp += X86G_CC_OP_SUBB;   break;
       default:       ppIROp(op8);
                      vpanic("setFlags_DEP1_DEP2(x86)");
    }
@@ -781,7 +781,7 @@
    switch (op8) {
       case Iop_Or8:
       case Iop_And8:
-      case Iop_Xor8: ccOp += CC_OP_LOGICB; break;
+      case Iop_Xor8: ccOp += X86G_CC_OP_LOGICB; break;
       default:       ppIROp(op8);
                      vpanic("setFlags_DEP1(x86)");
    }
@@ -810,8 +810,8 @@
       operation. */
    switch (op32) {
       case Iop_Shr32:
-      case Iop_Sar32: ccOp = CC_OP_SHRL - ccOp; break;
-      case Iop_Shl32: ccOp = CC_OP_SHLL - ccOp; break;
+      case Iop_Sar32: ccOp = X86G_CC_OP_SHRL - ccOp; break;
+      case Iop_Shl32: ccOp = X86G_CC_OP_SHLL - ccOp; break;
       default:        ppIROp(op32);
                       vpanic("setFlags_DEP1_DEP2_shift(x86)");
    }
@@ -838,14 +838,14 @@
 
 static void setFlags_INC_DEC ( Bool inc, IRTemp res, IRType ty )
 {
-   Int ccOp = inc ? CC_OP_INCB : CC_OP_DECB;
+   Int ccOp = inc ? X86G_CC_OP_INCB : X86G_CC_OP_DECB;
    
    ccOp += ty==Ity_I8 ? 0 : (ty==Ity_I16 ? 1 : 2);
    vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32);
 
    /* This has to come first, because calculating the C flag 
       may require reading all four thunk fields. */
-   stmt( IRStmt_Put( OFFB_CC_NDEP, mk_calculate_eflags_c()) );
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mk_x86g_calculate_eflags_c()) );
    stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(ccOp)) );
    stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(res)) );
    stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0)) );
@@ -880,34 +880,35 @@
 
 /* Condition codes, using the Intel encoding.  */
 
-static Char* name_Condcode ( Condcode cond )
+static Char* name_X86Condcode ( X86Condcode cond )
 {
    switch (cond) {
-      case CondO:      return "o";
-      case CondNO:     return "no";
-      case CondB:      return "b";
-      case CondNB:     return "nb";
-      case CondZ:      return "z";
-      case CondNZ:     return "nz";
-      case CondBE:     return "be";
-      case CondNBE:    return "nbe";
-      case CondS:      return "s";
-      case CondNS:     return "ns";
-      case CondP:      return "p";
-      case CondNP:     return "np";
-      case CondL:      return "l";
-      case CondNL:     return "nl";
-      case CondLE:     return "le";
-      case CondNLE:    return "nle";
-      case CondAlways: return "ALWAYS";
-      default: vpanic("name_Condcode");
+      case X86CondO:      return "o";
+      case X86CondNO:     return "no";
+      case X86CondB:      return "b";
+      case X86CondNB:     return "nb";
+      case X86CondZ:      return "z";
+      case X86CondNZ:     return "nz";
+      case X86CondBE:     return "be";
+      case X86CondNBE:    return "nbe";
+      case X86CondS:      return "s";
+      case X86CondNS:     return "ns";
+      case X86CondP:      return "p";
+      case X86CondNP:     return "np";
+      case X86CondL:      return "l";
+      case X86CondNL:     return "nl";
+      case X86CondLE:     return "le";
+      case X86CondNLE:    return "nle";
+      case X86CondAlways: return "ALWAYS";
+      default: vpanic("name_X86Condcode");
    }
 }
 
-static Condcode positiveIse_Condcode ( Condcode  cond,
-                                       Bool*     needInvert )
+static 
+X86Condcode positiveIse_X86Condcode ( X86Condcode  cond,
+                                      Bool*     needInvert )
 {
-   vassert(cond >= CondO && cond <= CondNLE);
+   vassert(cond >= X86CondO && cond <= X86CondNLE);
    if (cond & 1) {
       *needInvert = True;
       return cond-1;
@@ -934,11 +935,12 @@
    IROp    xor   = mkSizedOp(ty, Iop_Xor8);
 
    vassert(sz == 1 || sz == 2 || sz == 4);
-   thunkOp = sz==4 ? CC_OP_ADCL : (sz==2 ? CC_OP_ADCW : CC_OP_ADCB);
+   thunkOp = sz==4 ? X86G_CC_OP_ADCL 
+                   : (sz==2 ? X86G_CC_OP_ADCW : X86G_CC_OP_ADCB);
 
    /* oldc = old carry flag, 0 or 1 */
    assign( oldc,  binop(Iop_And32,
-                        mk_calculate_eflags_c(),
+                        mk_x86g_calculate_eflags_c(),
                         mkU32(1)) );
 
    assign( oldcn, narrowTo(ty, mkexpr(oldc)) );
@@ -949,7 +951,8 @@
 
    stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(thunkOp) ) );
    stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(ta1) ) );
-   stmt( IRStmt_Put( OFFB_CC_DEP2, binop(xor, mkexpr(ta2), mkexpr(oldcn)) ) );
+   stmt( IRStmt_Put( OFFB_CC_DEP2, binop(xor, mkexpr(ta2), 
+                                              mkexpr(oldcn)) ) );
    stmt( IRStmt_Put( OFFB_CC_NDEP, mkexpr(oldc) ) );
 }
 
@@ -968,11 +971,12 @@
    IROp    xor   = mkSizedOp(ty, Iop_Xor8);
 
    vassert(sz == 1 || sz == 2 || sz == 4);
-   thunkOp = sz==4 ? CC_OP_SBBL : (sz==2 ? CC_OP_SBBW : CC_OP_SBBB);
+   thunkOp = sz==4 ? X86G_CC_OP_SBBL 
+                   : (sz==2 ? X86G_CC_OP_SBBW : X86G_CC_OP_SBBB);
 
    /* oldc = old carry flag, 0 or 1 */
    assign( oldc, binop(Iop_And32,
-                       mk_calculate_eflags_c(),
+                       mk_x86g_calculate_eflags_c(),
                        mkU32(1)) );
 
    assign( oldcn, narrowTo(ty, mkexpr(oldc)) );
@@ -983,7 +987,8 @@
 
    stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(thunkOp) ) );
    stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(ta1) ) );
-   stmt( IRStmt_Put( OFFB_CC_DEP2, binop(xor, mkexpr(ta2), mkexpr(oldcn)) ) );
+   stmt( IRStmt_Put( OFFB_CC_DEP2, binop(xor, mkexpr(ta2), 
+                                              mkexpr(oldcn)) ) );
    stmt( IRStmt_Put( OFFB_CC_NDEP, mkexpr(oldc) ) );
 }
 
@@ -1420,18 +1425,19 @@
    irbb->jumpkind = kind;
 }
 
-static void jcc_01( Condcode cond, Addr32 d32_false, Addr32 d32_true )
+static 
+void jcc_01( X86Condcode cond, Addr32 d32_false, Addr32 d32_true )
 {
-   Bool     invert;
-   Condcode condPos;
-   condPos = positiveIse_Condcode ( cond, &invert );
+   Bool        invert;
+   X86Condcode condPos;
+   condPos = positiveIse_X86Condcode ( cond, &invert );
    if (invert) {
-      stmt( IRStmt_Exit( mk_calculate_condition(condPos),
+      stmt( IRStmt_Exit( mk_x86g_calculate_condition(condPos),
                          IRConst_U32(d32_false) ) );
       irbb->next     = mkU32(d32_true);
       irbb->jumpkind = Ijk_Boring;
    } else {
-      stmt( IRStmt_Exit( mk_calculate_condition(condPos),
+      stmt( IRStmt_Exit( mk_x86g_calculate_condition(condPos),
                          IRConst_U32(d32_true) ) );
       irbb->next     = mkU32(d32_false);
       irbb->jumpkind = Ijk_Boring;
@@ -1801,8 +1807,8 @@
    /* reg := 0 */
    putIReg(size, ge_reg, mkU(ty,0));
    /* Flags: C,A,O=0, Z=1, S=0, P=1 */
-   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(CC_OP_COPY) ));
-   stmt( IRStmt_Put( OFFB_CC_DEP1, mkU32(CC_MASK_Z|CC_MASK_P) ));
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP1, mkU32(X86G_CC_MASK_Z|X86G_CC_MASK_P) ));
    stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
    DIP("xor%c %s, %s\n", nameISize(size),
                          nameIReg(size,ge_reg), nameIReg(size,ge_reg) );
@@ -2412,18 +2418,18 @@
       IRExpr** args 
          = mkIRExprVec_4( widenUto32(mkexpr(dst0)), /* thing to rotate */
                           widenUto32(shift_expr),   /* rotate amount */
-                          widenUto32(mk_calculate_eflags_all()),
+                          widenUto32(mk_x86g_calculate_eflags_all()),
                           mkU32(sz) );
       assign( r64, mkIRExprCCall(
                       Ity_I64, 
                       0/*regparm*/, 
-                      "calculate_RCR", &calculate_RCR,
+                      "x86g_calculate_RCR", &x86g_calculate_RCR,
                       args
                    )
             );
       /* new eflags in hi half r64; new value in lo half r64 */
       assign( dst1, narrowTo(ty, unop(Iop_64to32, mkexpr(r64))) );
-      stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(CC_OP_COPY) ));
+      stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
       stmt( IRStmt_Put( OFFB_CC_DEP1, unop(Iop_64HIto32, mkexpr(r64)) ));
       stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
    }
@@ -2514,7 +2520,7 @@
                    )
             )
          );
-         ccOp += CC_OP_ROLB;
+         ccOp += X86G_CC_OP_ROLB;
 
       } else { /* right */
 
@@ -2531,7 +2537,7 @@
                    )
             )
          );
-         ccOp += CC_OP_RORB;
+         ccOp += X86G_CC_OP_RORB;
 
       }
 
@@ -2539,7 +2545,7 @@
          need the resulting value for this, and the previous flags.
          Except don't set it if the rotate count is zero. */
 
-      assign(oldFlags, mk_calculate_eflags_all());
+      assign(oldFlags, mk_x86g_calculate_eflags_all());
 
       /* CC_DEP1 is the rotated value.  CC_NDEP is flags before. */
       stmt( IRStmt_Put( OFFB_CC_OP,
@@ -2719,7 +2725,7 @@
          IRTemp resHi   = newTemp(Ity_I32);
          IRTemp resLo   = newTemp(Ity_I32);
          IROp   mulOp   = syned ? Iop_MullS32 : Iop_MullU32;
-         UInt   tBaseOp = syned ? CC_OP_SMULB : CC_OP_UMULB;
+         UInt   tBaseOp = syned ? X86G_CC_OP_SMULB : X86G_CC_OP_UMULB;
          setFlags_MUL ( Ity_I32, t1, tmp, tBaseOp );
          assign( res64, binop(mulOp, mkexpr(t1), mkexpr(tmp)) );
          assign( resHi, unop(Iop_64HIto32,mkexpr(res64)));
@@ -2733,7 +2739,7 @@
          IRTemp resHi   = newTemp(Ity_I16);
          IRTemp resLo   = newTemp(Ity_I16);
          IROp   mulOp   = syned ? Iop_MullS16 : Iop_MullU16;
-         UInt   tBaseOp = syned ? CC_OP_SMULB : CC_OP_UMULB;
+         UInt   tBaseOp = syned ? X86G_CC_OP_SMULB : X86G_CC_OP_UMULB;
          setFlags_MUL ( Ity_I16, t1, tmp, tBaseOp );
          assign( res32, binop(mulOp, mkexpr(t1), mkexpr(tmp)) );
          assign( resHi, unop(Iop_32HIto16,mkexpr(res32)));
@@ -2747,7 +2753,7 @@
          IRTemp resHi   = newTemp(Ity_I8);
          IRTemp resLo   = newTemp(Ity_I8);
          IROp   mulOp   = syned ? Iop_MullS8 : Iop_MullU8;
-         UInt   tBaseOp = syned ? CC_OP_SMULB : CC_OP_UMULB;
+         UInt   tBaseOp = syned ? X86G_CC_OP_SMULB : X86G_CC_OP_UMULB;
          setFlags_MUL ( Ity_I8, t1, tmp, tBaseOp );
          assign( res16, binop(mulOp, mkexpr(t1), mkexpr(tmp)) );
          assign( resHi, unop(Iop_16HIto8,mkexpr(res16)));
@@ -3211,7 +3217,7 @@
    We assume the insn is the last one in the basic block, and so emit a jump
    to the next insn, rather than just falling through. */
 static 
-void dis_REP_op ( Condcode cond,
+void dis_REP_op ( X86Condcode cond,
                   void (*dis_OP)(Int, IRTemp),
                   Int sz, Addr32 eip, Addr32 eip_next, Char* name )
 {
@@ -3233,10 +3239,10 @@
    dis_string_op_increment(sz, t_inc);
    dis_OP (sz, t_inc);
 
-   if (cond == CondAlways) {
+   if (cond == X86CondAlways) {
       jmp_lit(Ijk_Boring,eip);
    } else {
-      stmt( IRStmt_Exit( mk_calculate_condition(cond),
+      stmt( IRStmt_Exit( mk_x86g_calculate_condition(cond),
                          IRConst_U32(eip) ) );
       jmp_lit(Ijk_Boring,eip_next);
    }
@@ -3270,7 +3276,7 @@
       assign( te, loadLE(ty,mkexpr(addr)) );
    }
 
-   setFlags_MUL ( ty, te, tg, CC_OP_SMULB );
+   setFlags_MUL ( ty, te, tg, X86G_CC_OP_SMULB );
 
    assign( resLo, binop( mkSizedOp(ty, Iop_Mul8), mkexpr(te), mkexpr(tg) ) );
 
@@ -3324,7 +3330,7 @@
 
    assign( resLo, binop( mkSizedOp(ty, Iop_Mul8), mkexpr(te), mkexpr(tl) ));
 
-   setFlags_MUL ( ty, te, tl, CC_OP_SMULB );
+   setFlags_MUL ( ty, te, tl, X86G_CC_OP_SMULB );
 
    putIReg(size, gregOfRM(rm), mkexpr(resLo));
 
@@ -3506,7 +3512,7 @@
 
 static void clear_C2 ( void )
 {
-  put_C3210( binop(Iop_And32, get_C3210(), mkU32(~FC_MASK_C2)) );
+  put_C3210( binop(Iop_And32, get_C3210(), mkU32(~X86G_FC_MASK_C2)) );
 }
 
 
@@ -3603,7 +3609,7 @@
       Z,P,C,O correctly, but forces A and S to zero, whereas the Intel
       documentation implies A and S are unchanged. 
    */
-   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
    stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
    stmt( IRStmt_Put( OFFB_CC_DEP1,
                      binop( Iop_And32,
@@ -3858,7 +3864,7 @@
                put_C3210(mkIRExprCCall(
                             Ity_I32, 
                             0/*regparm*/, 
-                            "calculate_FXAM", &calculate_FXAM,
+                            "x86g_calculate_FXAM", &x86g_calculate_FXAM,
                             args
                         ));
                DIP("fxam");
@@ -4078,7 +4084,8 @@
                DIP("fcmovz %%st(%d), %%st(0)", r_src);
                put_ST_UNCHECKED(0, 
                                 IRExpr_Mux0X( 
-                                    unop(Iop_1Uto8,mk_calculate_condition(CondZ)), 
+                                    unop(Iop_1Uto8,
+                                         mk_x86g_calculate_condition(X86CondZ)), 
                                     get_ST(0), get_ST(r_src)) );
                break;
 
@@ -4145,7 +4152,8 @@
 
                IRDirty* d = unsafeIRDirty_1_N ( 
                                val, 
-                               0/*regparms*/, "loadF80le", &loadF80le, 
+                               0/*regparms*/, 
+                               "x86g_loadF80le", &x86g_loadF80le, 
                                args 
                             );
                /* declare that we're reading memory */
@@ -4170,7 +4178,7 @@
 
                IRDirty* d = unsafeIRDirty_0_N ( 
                                0/*regparms*/, 
-                               "storeF80le", &storeF80le,
+                               "x86g_storeF80le", &x86g_storeF80le,
                                args 
                             );
                /* declare we're writing memory */
@@ -4202,7 +4210,8 @@
                DIP("fcmovnz %%st(%d), %%st(0)", r_src);
                put_ST_UNCHECKED(0, 
                                 IRExpr_Mux0X( 
-                                    unop(Iop_1Uto8,mk_calculate_condition(CondNZ)), 
+                                    unop(Iop_1Uto8,
+                                         mk_x86g_calculate_condition(X86CondNZ)), 
                                     get_ST(0), get_ST(r_src)) );
                break;
 
@@ -4648,60 +4657,60 @@
    void* hAddr = NULL;
    Char* hName = NULL;
    switch (opc) {
-      case 0xFC: XXX(calculate_add8x8); break;
-      case 0xFD: XXX(calculate_add16x4); break;
-      case 0xFE: XXX(calculate_add32x2); break;
+      case 0xFC: XXX(x86g_calculate_add8x8); break;
+      case 0xFD: XXX(x86g_calculate_add16x4); break;
+      case 0xFE: XXX(x86g_calculate_add32x2); break;
 
-      case 0xEC: XXX(calculate_qadd8Sx8); break;
-      case 0xED: XXX(calculate_qadd16Sx4); break;
+      case 0xEC: XXX(x86g_calculate_qadd8Sx8); break;
+      case 0xED: XXX(x86g_calculate_qadd16Sx4); break;
 
-      case 0xDC: XXX(calculate_qadd8Ux8); break;
-      case 0xDD: XXX(calculate_qadd16Ux4); break;
+      case 0xDC: XXX(x86g_calculate_qadd8Ux8); break;
+      case 0xDD: XXX(x86g_calculate_qadd16Ux4); break;
 
-      case 0xF8: XXX(calculate_sub8x8);  break;
-      case 0xF9: XXX(calculate_sub16x4); break;
-      case 0xFA: XXX(calculate_sub32x2); break;
+      case 0xF8: XXX(x86g_calculate_sub8x8);  break;
+      case 0xF9: XXX(x86g_calculate_sub16x4); break;
+      case 0xFA: XXX(x86g_calculate_sub32x2); break;
 
-      case 0xE8: XXX(calculate_qsub8Sx8); break;
-      case 0xE9: XXX(calculate_qsub16Sx4); break;
+      case 0xE8: XXX(x86g_calculate_qsub8Sx8); break;
+      case 0xE9: XXX(x86g_calculate_qsub16Sx4); break;
 
-      case 0xD8: XXX(calculate_qsub8Ux8); break;
-      case 0xD9: XXX(calculate_qsub16Ux4); break;
+      case 0xD8: XXX(x86g_calculate_qsub8Ux8); break;
+      case 0xD9: XXX(x86g_calculate_qsub16Ux4); break;
 
-      case 0xE5: XXX(calculate_mulhi16x4); break;
-      case 0xD5: XXX(calculate_mullo16x4); break;
-      case 0xF5: XXX(calculate_pmaddwd); break;
+      case 0xE5: XXX(x86g_calculate_mulhi16x4); break;
+      case 0xD5: XXX(x86g_calculate_mullo16x4); break;
+      case 0xF5: XXX(x86g_calculate_pmaddwd); break;
 
-      case 0x74: XXX(calculate_cmpeq8x8); break;
-      case 0x75: XXX(calculate_cmpeq16x4); break;
-      case 0x76: XXX(calculate_cmpeq32x2); break;
+      case 0x74: XXX(x86g_calculate_cmpeq8x8); break;
+      case 0x75: XXX(x86g_calculate_cmpeq16x4); break;
+      case 0x76: XXX(x86g_calculate_cmpeq32x2); break;
 
-      case 0x64: XXX(calculate_cmpge8Sx8); break;
-      case 0x65: XXX(calculate_cmpge16Sx4); break;
-      case 0x66: XXX(calculate_cmpge32Sx2); break;
+      case 0x64: XXX(x86g_calculate_cmpge8Sx8); break;
+      case 0x65: XXX(x86g_calculate_cmpge16Sx4); break;
+      case 0x66: XXX(x86g_calculate_cmpge32Sx2); break;
 
-      case 0x6B: XXX(calculate_packssdw); break;
-      case 0x63: XXX(calculate_packsswb); break;
-      case 0x67: XXX(calculate_packuswb); break;
+      case 0x6B: XXX(x86g_calculate_packssdw); break;
+      case 0x63: XXX(x86g_calculate_packsswb); break;
+      case 0x67: XXX(x86g_calculate_packuswb); break;
 
-      case 0x68: XXX(calculate_punpckhbw); break;
-      case 0x69: XXX(calculate_punpckhwd); break;
-      case 0x6A: XXX(calculate_punpckhdq); break;
+      case 0x68: XXX(x86g_calculate_punpckhbw); break;
+      case 0x69: XXX(x86g_calculate_punpckhwd); break;
+      case 0x6A: XXX(x86g_calculate_punpckhdq); break;
 
-      case 0x60: XXX(calculate_punpcklbw); break;
-      case 0x61: XXX(calculate_punpcklwd); break;
-      case 0x62: XXX(calculate_punpckldq); break;
+      case 0x60: XXX(x86g_calculate_punpcklbw); break;
+      case 0x61: XXX(x86g_calculate_punpcklwd); break;
+      case 0x62: XXX(x86g_calculate_punpckldq); break;
 
-      case 0xF1: XXX(calculate_shl16x4); break;
-      case 0xF2: XXX(calculate_shl32x2); break;
-      case 0xF3: XXX(calculate_shl64x1); break;
+      case 0xF1: XXX(x86g_calculate_shl16x4); break;
+      case 0xF2: XXX(x86g_calculate_shl32x2); break;
+      case 0xF3: XXX(x86g_calculate_shl64x1); break;
 
-      case 0xD1: XXX(calculate_shr16Ux4); break;
-      case 0xD2: XXX(calculate_shr32Ux2); break;
-      case 0xD3: XXX(calculate_shr64Ux1); break;
+      case 0xD1: XXX(x86g_calculate_shr16Ux4); break;
+      case 0xD2: XXX(x86g_calculate_shr32Ux2); break;
+      case 0xD3: XXX(x86g_calculate_shr64Ux1); break;
 
-      case 0xE1: XXX(calculate_shr16Sx4); break;
-      case 0xE2: XXX(calculate_shr32Sx2); break;
+      case 0xE1: XXX(x86g_calculate_shr16Sx4); break;
+      case 0xE2: XXX(x86g_calculate_shr32Sx2); break;
 
       case 0xDB: break; /* AND */
       case 0xDF: break; /* ANDN */
@@ -5213,7 +5222,7 @@
  
    /* Side effect done; now get selected bit into Carry flag */
    /* Flags: C=selected bit, O,S,Z,A,P undefined, so are set to zero. */
-   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
    stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
    stmt( IRStmt_Put( 
             OFFB_CC_DEP1,
@@ -5284,13 +5293,13 @@
 
    /* Flags: Z is 1 iff source value is zero.  All others 
       are undefined -- we force them to zero. */
-   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
    stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
    stmt( IRStmt_Put( 
             OFFB_CC_DEP1,
             IRExpr_Mux0X( mkexpr(src8),
                           /* src==0 */
-                          mkU32(CC_MASK_Z),
+                          mkU32(X86G_CC_MASK_Z),
                           /* src!=0 */
                           mkU32(0)
                         )
@@ -5365,17 +5374,19 @@
 void codegen_SAHF ( void )
 {
    /* Set the flags to:
-      (calculate_flags_all() & CC_MASK_O)  -- retain the old O flag
-      | (%AH & (CC_MASK_S|CC_MASK_Z|CC_MASK_A|CC_MASK_P|CC_MASK_C)
+      (x86g_calculate_flags_all() & X86G_CC_MASK_O)  -- retain the old O flag
+      | (%AH & (X86G_CC_MASK_S|X86G_CC_MASK_Z|X86G_CC_MASK_A
+                |X86G_CC_MASK_P|X86G_CC_MASK_C)
    */
-   UInt   mask_SZACP = CC_MASK_S|CC_MASK_Z|CC_MASK_A|CC_MASK_C|CC_MASK_P;
+   UInt   mask_SZACP = X86G_CC_MASK_S|X86G_CC_MASK_Z|X86G_CC_MASK_A
+                       |X86G_CC_MASK_C|X86G_CC_MASK_P;
    IRTemp oldflags   = newTemp(Ity_I32);
-   assign( oldflags, mk_calculate_eflags_all() );
-   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(CC_OP_COPY) ));
+   assign( oldflags, mk_x86g_calculate_eflags_all() );
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
    stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
    stmt( IRStmt_Put( OFFB_CC_DEP1,
          binop(Iop_Or32,
-               binop(Iop_And32, mkexpr(oldflags), mkU32(CC_MASK_O)),
+               binop(Iop_And32, mkexpr(oldflags), mkU32(X86G_CC_MASK_O)),
                binop(Iop_And32, 
                      binop(Iop_Shr32, getIReg(4, R_EAX), mkU8(8)),
                      mkU32(mask_SZACP))
@@ -5442,7 +5453,7 @@
    assign( acc, getIReg(size, R_EAX) );
    //assign( res, binop( mkSizedOp(ty,Iop_Sub8), mkexpr(acc), mkexpr(dest) ));
    setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
-   assign( cond8, unop(Iop_1Uto8, mk_calculate_condition(CondZ)) );
+   assign( cond8, unop(Iop_1Uto8, mk_x86g_calculate_condition(X86CondZ)) );
    assign( dest2, IRExpr_Mux0X(mkexpr(cond8), mkexpr(dest), mkexpr(src)) );
    assign( acc2,  IRExpr_Mux0X(mkexpr(cond8), mkexpr(dest), mkexpr(acc)) );
    putIReg(size, R_EAX, mkexpr(acc2));
@@ -5545,7 +5556,7 @@
 static
 UInt dis_cmov_E_G ( UChar       sorb,
                     Int         sz, 
-                    Condcode    cond,
+                    X86Condcode cond,
                     UInt        delta0 )
 {
    UChar rm  = getIByte(delta0);
@@ -5561,12 +5572,13 @@
       assign( tmpd, getIReg(sz, gregOfRM(rm)) );
 
       putIReg(sz, gregOfRM(rm),
-                  IRExpr_Mux0X( unop(Iop_1Uto8,mk_calculate_condition(cond)),
+                  IRExpr_Mux0X( unop(Iop_1Uto8,
+                                     mk_x86g_calculate_condition(cond)),
                                 mkexpr(tmpd),
                                 mkexpr(tmps) )
              );
       DIP("cmov%c%s %s,%s\n", nameISize(sz), 
-                              name_Condcode(cond),
+                              name_X86Condcode(cond),
                               nameIReg(sz,eregOfRM(rm)),
                               nameIReg(sz,gregOfRM(rm)));
       return 1+delta0;
@@ -5579,13 +5591,14 @@
       assign( tmpd, getIReg(sz, gregOfRM(rm)) );
 
       putIReg(sz, gregOfRM(rm),
-                  IRExpr_Mux0X( unop(Iop_1Uto8,mk_calculate_condition(cond)),
+                  IRExpr_Mux0X( unop(Iop_1Uto8,
+                                     mk_x86g_calculate_condition(cond)),
                                 mkexpr(tmpd),
                                 mkexpr(tmps) )
              );
 
       DIP("cmov%c%s %s,%s\n", nameISize(sz), 
-                              name_Condcode(cond),
+                              name_X86Condcode(cond),
                               dis_buf,
                               nameIReg(sz,gregOfRM(rm)));
       return len+delta0;
@@ -8036,9 +8049,9 @@
    case 0x7F: /* JGb/JNLEb (jump greater) */
       d32 = (((Addr32)guest_eip_bbstart)+delta+1) + getSDisp8(delta); 
       delta++;
-      jcc_01((Condcode)(opc - 0x70), (Addr32)(guest_eip_bbstart+delta), d32);
+      jcc_01((X86Condcode)(opc - 0x70), (Addr32)(guest_eip_bbstart+delta), d32);
       whatNext = Dis_StopHere;
-      DIP("j%s-8 0x%x\n", name_Condcode(opc - 0x70), d32);
+      DIP("j%s-8 0x%x\n", name_X86Condcode(opc - 0x70), d32);
       break;
 
    case 0xE3: /* JECXZ or perhaps JCXZ, depending on OSO ?  Intel
@@ -8421,14 +8434,15 @@
       assign(t1, widenUto32(loadLE(szToITy(sz),mkexpr(t2))));
       putIReg(4, R_ESP, binop(Iop_Add32, mkexpr(t2), mkU32(sz)));
       /* t1 is the flag word.  Mask out everything except OSZACP and 
-         set the flags thunk to CC_OP_COPY. */
-      stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(CC_OP_COPY) ));
+         set the flags thunk to X86G_CC_OP_COPY. */
+      stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
       stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
       stmt( IRStmt_Put( OFFB_CC_DEP1, 
                         binop(Iop_And32,
                               mkexpr(t1), 
-                              mkU32( CC_MASK_C | CC_MASK_P | CC_MASK_A 
-                                     | CC_MASK_Z | CC_MASK_S| CC_MASK_O )
+                              mkU32( X86G_CC_MASK_C | X86G_CC_MASK_P 
+                                     | X86G_CC_MASK_A | X86G_CC_MASK_Z 
+                                     | X86G_CC_MASK_S| X86G_CC_MASK_O )
                              )
                        )
           );
@@ -8597,7 +8611,7 @@
       putIReg(4, R_ESP, mkexpr(t1) );
 
       t2 = newTemp(Ity_I32);
-      assign( t2, mk_calculate_eflags_all() );
+      assign( t2, mk_x86g_calculate_eflags_all() );
 
       /* Patch in the D flag.  This can simply be the inversion
          of bit 10 of baseBlock[OFFB_DFLAG]. */
@@ -8769,8 +8783,8 @@
 //-- 
       case 0xAE: sz = 1;   /* REPNE SCAS<sz> */
       case 0xAF:
-         dis_REP_op ( CondNZ, dis_SCAS, sz, eip_orig,
-                              guest_eip_bbstart+delta, "repne scas" );
+         dis_REP_op ( X86CondNZ, dis_SCAS, sz, eip_orig,
+                                 guest_eip_bbstart+delta, "repne scas" );
          break;
 
       default:
@@ -8792,20 +8806,20 @@
       switch (abyte) {
       case 0xA4: sz = 1;   /* REP MOVS<sz> */
       case 0xA5:
-         dis_REP_op ( CondAlways, dis_MOVS, sz, eip_orig, 
-                                  guest_eip_bbstart+delta, "rep movs" );
+         dis_REP_op ( X86CondAlways, dis_MOVS, sz, eip_orig, 
+                                     guest_eip_bbstart+delta, "rep movs" );
          break;
 
       case 0xA6: sz = 1;   /* REPE CMP<sz> */
       case 0xA7:
-         dis_REP_op ( CondZ, dis_CMPS, sz, eip_orig, 
-                             guest_eip_bbstart+delta, "repe cmps" );
+         dis_REP_op ( X86CondZ, dis_CMPS, sz, eip_orig, 
+                                guest_eip_bbstart+delta, "repe cmps" );
          break;
 
       case 0xAA: sz = 1;   /* REP STOS<sz> */
       case 0xAB:
-         dis_REP_op ( CondAlways, dis_STOS, sz, eip_orig, 
-                                  guest_eip_bbstart+delta, "rep stos" );
+         dis_REP_op ( X86CondAlways, dis_STOS, sz, eip_orig, 
+                                     guest_eip_bbstart+delta, "rep stos" );
          break;
 //-- 
 //--       case 0xAE: sz = 1;   /* REPE SCAS<sz> */
@@ -9160,7 +9174,7 @@
       case 0x4D: /* CMOVGEb/CMOVNLb (cmov greater or equal) */
       case 0x4E: /* CMOVLEb/CMOVNGb (cmov less or equal) */
       case 0x4F: /* CMOVGb/CMOVNLEb (cmov greater) */
-         delta = dis_cmov_E_G(sorb, sz, (Condcode)(opc - 0x40), delta);
+         delta = dis_cmov_E_G(sorb, sz, (X86Condcode)(opc - 0x40), delta);
          break;
 
       /* =-=-=-=-=-=-=-=-=- CMPXCHG -=-=-=-=-=-=-=-=-=-= */
@@ -9184,7 +9198,8 @@
          */
          IRDirty* d = unsafeIRDirty_0_N ( 
                          0/*regparms*/, 
-                         "dirtyhelper_CPUID", &dirtyhelper_CPUID, 
+                         "x86g_dirtyhelper_CPUID", 
+                         &x86g_dirtyhelper_CPUID, 
                          mkIRExprVec_0()
                       );
          /* declare guest state effects */
@@ -9307,9 +9322,11 @@
       case 0x8F: /* JGb/JNLEb (jump greater) */
          d32 = (((Addr32)guest_eip_bbstart)+delta+4) + getUDisp32(delta); 
          delta += 4;
-         jcc_01((Condcode)(opc - 0x80), (Addr32)(guest_eip_bbstart+delta), d32);
+         jcc_01( (X86Condcode)(opc - 0x80), 
+                 (Addr32)(guest_eip_bbstart+delta), 
+                 d32 );
          whatNext = Dis_StopHere;
-         DIP("j%s-32 0x%x\n", name_Condcode(opc - 0x80), d32);
+         DIP("j%s-32 0x%x\n", name_X86Condcode(opc - 0x80), d32);
          break;
 
 
@@ -9361,18 +9378,18 @@
       case 0x9E: /* set-LEb/set-NGb (jump less or equal) */
       case 0x9F: /* set-Gb/set-NLEb (jump greater) */
          t1 = newTemp(Ity_I8);
-         assign( t1, unop(Iop_1Uto8,mk_calculate_condition(opc-0x90)) );
+         assign( t1, unop(Iop_1Uto8,mk_x86g_calculate_condition(opc-0x90)) );
          modrm = getIByte(delta);
          if (epartIsReg(modrm)) {
             delta++;
             putIReg(1, eregOfRM(modrm), mkexpr(t1));
-            DIP("set%s %s\n", name_Condcode(opc-0x90), 
+            DIP("set%s %s\n", name_X86Condcode(opc-0x90), 
                               nameIReg(1,eregOfRM(modrm)));
          } else {
            addr = disAMode ( &alen, sorb, delta, dis_buf );
            delta += alen;
            storeLE( mkexpr(addr), mkexpr(t1) );
-           DIP("set%s %s\n", name_Condcode(opc-0x90), dis_buf);
+           DIP("set%s %s\n", name_X86Condcode(opc-0x90), dis_buf);
          }
          break;
 
diff --git a/priv/main/vex_main.c b/priv/main/vex_main.c
index a127d34..9b94970 100644
--- a/priv/main/vex_main.c
+++ b/priv/main/vex_main.c
@@ -35,14 +35,18 @@
 
 #include "libvex.h"
 #include "libvex_guest_x86.h"
+#include "libvex_guest_arm.h"
 
 #include "main/vex_globals.h"
 #include "main/vex_util.h"
 #include "host-generic/h_generic_regs.h"
-#include "host-x86/hdefs.h"
-#include "guest-x86/gdefs.h"
 #include "ir/iropt.h"
 
+#include "host-x86/hdefs.h"
+
+#include "guest-x86/gdefs.h"
+#include "guest-arm/gdefs.h"
+
 
 /* This file contains the top level interface to the library. */
 
@@ -216,9 +220,12 @@
    vassert(vex_initdone);
    LibVEX_ClearTemporary(False);
 
+
    /* First off, check that the guest and host insn sets
       are supported. */
+
    switch (iset_host) {
+
       case InsnSetX86:
          getAllocableRegs_X86 ( &n_available_real_regs,
                                 &available_real_regs );
@@ -234,23 +241,37 @@
 	 host_is_bigendian = False;
          host_word_type    = Ity_I32;
          break;
+
       default:
          vpanic("LibVEX_Translate: unsupported target insn set");
    }
 
+
    switch (iset_guest) {
+
       case InsnSetX86:
          preciseMemExnsFn = guest_x86_state_requires_precise_mem_exns;
-         bbToIR           = bbToIR_X86Instr;
-         specHelper       = x86guest_spechelper;
+         bbToIR           = bbToIR_X86;
+         specHelper       = guest_x86_spechelper;
          guest_sizeB      = sizeof(VexGuestX86State);
          guest_word_type  = Ity_I32;
          guest_layout     = &x86guest_layout;
          break;
+
+      case InsnSetARM:
+         preciseMemExnsFn = guest_arm_state_requires_precise_mem_exns;
+         bbToIR           = NULL; /*bbToIR_ARM;*/
+         specHelper       = guest_arm_spechelper;
+         guest_sizeB      = sizeof(VexGuestARMState);
+         guest_word_type  = Ity_I32;
+         guest_layout     = &armGuest_layout;
+         break;
+
       default:
          vpanic("LibVEX_Translate: unsupported guest insn set");
    }
 
+
    if (vex_traceflags & VEX_TRACE_FE)
       vex_printf("\n------------------------" 
                    " Front end "
diff --git a/pub/libvex_guest_arm.h b/pub/libvex_guest_arm.h
index 49ce571..4ab7463 100644
--- a/pub/libvex_guest_arm.h
+++ b/pub/libvex_guest_arm.h
@@ -38,10 +38,13 @@
 
 #include "libvex_basictypes.h"
 
+
 /*---------------------------------------------------------------*/
 /*--- Vex's representation of the ARM CPU state.              ---*/
 /*---------------------------------------------------------------*/
 
+/* R13 traditionally used as the stack pointer ? */
+
 typedef
    struct {
       UInt  guest_R0;
@@ -58,63 +61,29 @@
       UInt  guest_R11;
       UInt  guest_R12;
       UInt  guest_R13;
-      UInt  guest_R14;
 
+      /* aka the link register */
+      UInt  guest_R14; 
+
+      /* Program counter. */
       UInt  guest_R15;
 
-      UInt  guest_PSW;
-
+      /* System call number copied in here from swi insn literal
+         field. */
       UInt  guest_SYSCALLNO;
 
+      /* 3-word thunk used to calculate N(sign) Z(zero) C(carry,
+         unsigned overflow) and V(signed overflow) flags. */
+      UInt  guest_CC_OP;
+      UInt  guest_CC_DEP1;
+      UInt  guest_CC_DEP2;
+
       /* Padding to make it have an 8-aligned size */
       /* UInt   padding; */
    }
    VexGuestARMState;
 
 
-
-/*---------------------------------------------------------------*/
-/*--- Utility functions for arm guest stuff.                  ---*/
-/*---------------------------------------------------------------*/
-
-#if 0
-/* ALL THE FOLLOWING ARE VISIBLE TO LIBRARY CLIENT */
-
-/* Initialise all guest x86 state.  The FPU is put in default mode. */
-extern
-void LibVEX_GuestX86_initialise ( /*OUT*/VexGuestX86State* vex_state );
-
-
-/* Convert a saved x87 FPU image (as created by fsave) and write it
-   into the supplied VexGuestX86State structure.  The non-FP parts of
-   said structure are left unchanged.  
-*/
-extern 
-void LibVEX_GuestX86_put_x87 ( /*IN*/UChar* x87_state, 
-                               /*OUT*/VexGuestX86State* vex_state );
-
-/* Extract from the supplied VexGuestX86State structure, an x87 FPU
-   image. */
-extern 
-void LibVEX_GuestX86_get_x87 ( /*IN*/VexGuestX86State* vex_state, 
-                               /*OUT*/UChar* x87_state );
-
-
-/* Given a 32-bit word containing native x86 %eflags values, set the
-   eflag-related fields in the supplied VexGuestX86State accordingly.
-   All other fields are left unchanged.  */
-
-extern
-void LibVEX_GuestX86_put_eflags ( UInt eflags_native,
-                                  /*OUT*/VexGuestX86State* vex_state );
-
-/* Extract from the supplied VexGuestX86State structure the
-   corresponding native %eflags value. */
-
-extern 
-UInt LibVEX_GuestX86_get_eflags ( /*IN*/VexGuestX86State* vex_state );
-#endif /* 0 */
-
 #endif /* ndef __LIBVEX_PUB_GUEST_ARM_H */
 
 /*---------------------------------------------------------------*/