arch/tile: finish enabling support for TILE-Gx 64-bit chip

This support was partially present in the existing code (look for
"__tilegx__" ifdefs) but with this change you can build a working
kernel using the TILE-Gx toolchain and ARCH=tilegx.

Most of these files are new, generally adding a foo_64.c file
where previously there was just a foo_32.c file.

The ARCH=tilegx directive redirects to arch/tile, not arch/tilegx,
using the existing SRCARCH mechanism in the top-level Makefile.

Changes to existing files:

- <asm/bitops.h> and <asm/bitops_32.h> changed to factor the
  include of <asm-generic/bitops/non-atomic.h> in the common header.

- <asm/compat.h> and arch/tile/kernel/compat.c changed to remove
  the "const" markers I had put on compat_sys_execve() when trying
  to match some recent similar changes to the non-compat execve.
  It turns out the compat version wasn't "upgraded" to use const.

- <asm/opcode-tile_64.h> and <asm/opcode_constants_64.h> were
  previously included accidentally, with the 32-bit contents.  Now
  they have the proper 64-bit contents.

Finally, I had to hack the existing hacky drivers/input/input-compat.h
to add yet another "#ifdef" for INPUT_COMPAT_TEST (same as x86_64).

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Acked-by: Dmitry Torokhov <dmitry.torokhov@gmail.com> [drivers/input]
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
new file mode 100644
index 0000000..3217052
--- /dev/null
+++ b/arch/tile/include/asm/atomic_64.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * Do not include directly; use <asm/atomic.h>.
+ */
+
+#ifndef _ASM_TILE_ATOMIC_64_H
+#define _ASM_TILE_ATOMIC_64_H
+
+#ifndef __ASSEMBLY__
+
+#include <arch/spr_def.h>
+
+/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
+
+#define atomic_set(v, i) ((v)->counter = (i))
+
+/*
+ * The smp_mb() operations throughout are to support the fact that
+ * Linux requires memory barriers before and after the operation,
+ * on any routine which updates memory and returns a value.
+ */
+
+static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
+{
+	int val;
+	__insn_mtspr(SPR_CMPEXCH_VALUE, o);
+	smp_mb();  /* barrier for proper semantics */
+	val = __insn_cmpexch4((void *)&v->counter, n);
+	smp_mb();  /* barrier for proper semantics */
+	return val;
+}
+
+static inline int atomic_xchg(atomic_t *v, int n)
+{
+	int val;
+	smp_mb();  /* barrier for proper semantics */
+	val = __insn_exch4((void *)&v->counter, n);
+	smp_mb();  /* barrier for proper semantics */
+	return val;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+	__insn_fetchadd4((void *)&v->counter, i);
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	int val;
+	smp_mb();  /* barrier for proper semantics */
+	val = __insn_fetchadd4((void *)&v->counter, i) + i;
+	barrier();  /* the "+ i" above will wait on memory */
+	return val;
+}
+
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+	int guess, oldval = v->counter;
+	do {
+		if (oldval == u)
+			break;
+		guess = oldval;
+		oldval = atomic_cmpxchg(v, guess, guess + a);
+	} while (guess != oldval);
+	return oldval != u;
+}
+
+/* Now the true 64-bit operations. */
+
+#define ATOMIC64_INIT(i)	{ (i) }
+
+#define atomic64_read(v)		((v)->counter)
+#define atomic64_set(v, i) ((v)->counter = (i))
+
+static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n)
+{
+	long val;
+	smp_mb();  /* barrier for proper semantics */
+	__insn_mtspr(SPR_CMPEXCH_VALUE, o);
+	val = __insn_cmpexch((void *)&v->counter, n);
+	smp_mb();  /* barrier for proper semantics */
+	return val;
+}
+
+static inline long atomic64_xchg(atomic64_t *v, long n)
+{
+	long val;
+	smp_mb();  /* barrier for proper semantics */
+	val = __insn_exch((void *)&v->counter, n);
+	smp_mb();  /* barrier for proper semantics */
+	return val;
+}
+
+static inline void atomic64_add(long i, atomic64_t *v)
+{
+	__insn_fetchadd((void *)&v->counter, i);
+}
+
+static inline long atomic64_add_return(long i, atomic64_t *v)
+{
+	int val;
+	smp_mb();  /* barrier for proper semantics */
+	val = __insn_fetchadd((void *)&v->counter, i) + i;
+	barrier();  /* the "+ i" above will wait on memory */
+	return val;
+}
+
+static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+	long guess, oldval = v->counter;
+	do {
+		if (oldval == u)
+			break;
+		guess = oldval;
+		oldval = atomic64_cmpxchg(v, guess, guess + a);
+	} while (guess != oldval);
+	return oldval != u;
+}
+
+#define atomic64_sub_return(i, v)	atomic64_add_return(-(i), (v))
+#define atomic64_sub(i, v)		atomic64_add(-(i), (v))
+#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
+#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
+#define atomic64_inc(v)			atomic64_add(1, (v))
+#define atomic64_dec(v)			atomic64_sub(1, (v))
+
+#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
+#define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
+#define atomic64_sub_and_test(i, v)	(atomic64_sub_return((i), (v)) == 0)
+#define atomic64_add_negative(i, v)	(atomic64_add_return((i), (v)) < 0)
+
+#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
+
+/* Atomic dec and inc don't implement barrier, so provide them if needed. */
+#define smp_mb__before_atomic_dec()	smp_mb()
+#define smp_mb__after_atomic_dec()	smp_mb()
+#define smp_mb__before_atomic_inc()	smp_mb()
+#define smp_mb__after_atomic_inc()	smp_mb()
+
+#define xchg(ptr, x)							\
+	((typeof(*(ptr)))						\
+	 ((sizeof(*(ptr)) == sizeof(atomic_t)) ?			\
+	  atomic_xchg((atomic_t *)(ptr), (long)(x)) :			\
+	  (sizeof(*(ptr)) == sizeof(atomic_long_t)) ?			\
+	  atomic_long_xchg((atomic_long_t *)(ptr), (long)(x)) :		\
+	  __xchg_called_with_bad_pointer()))
+
+#define cmpxchg(ptr, o, n)						\
+	((typeof(*(ptr)))						\
+	 ((sizeof(*(ptr)) == sizeof(atomic_t)) ?			\
+	  atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) :	\
+	  (sizeof(*(ptr)) == sizeof(atomic_long_t)) ?			\
+	  atomic_long_cmpxchg((atomic_long_t *)(ptr), (long)(o), (long)(n)) : \
+	  __cmpxchg_called_with_bad_pointer()))
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_TILE_ATOMIC_64_H */
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h
index 132e6bb..16f1fa5 100644
--- a/arch/tile/include/asm/bitops.h
+++ b/arch/tile/include/asm/bitops.h
@@ -122,6 +122,7 @@
 #include <asm-generic/bitops/lock.h>
 #include <asm-generic/bitops/find.h>
 #include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/non-atomic.h>
 #include <asm-generic/bitops/le.h>
 
 #endif /* _ASM_TILE_BITOPS_H */
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h
index 2638be5..d31ab90 100644
--- a/arch/tile/include/asm/bitops_32.h
+++ b/arch/tile/include/asm/bitops_32.h
@@ -126,7 +126,6 @@
 #define smp_mb__before_clear_bit()	smp_mb()
 #define smp_mb__after_clear_bit()	do {} while (0)
 
-#include <asm-generic/bitops/non-atomic.h>
 #include <asm-generic/bitops/ext2-atomic.h>
 
 #endif /* _ASM_TILE_BITOPS_32_H */
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h
new file mode 100644
index 0000000..99615e8
--- /dev/null
+++ b/arch/tile/include/asm/bitops_64.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef _ASM_TILE_BITOPS_64_H
+#define _ASM_TILE_BITOPS_64_H
+
+#include <linux/compiler.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+/* See <asm/bitops.h> for API comments. */
+
+static inline void set_bit(unsigned nr, volatile unsigned long *addr)
+{
+	unsigned long mask = (1UL << (nr % BITS_PER_LONG));
+	__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask);
+}
+
+static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
+{
+	unsigned long mask = (1UL << (nr % BITS_PER_LONG));
+	__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
+}
+
+#define smp_mb__before_clear_bit()	smp_mb()
+#define smp_mb__after_clear_bit()	smp_mb()
+
+
+static inline void change_bit(unsigned nr, volatile unsigned long *addr)
+{
+	unsigned long old, mask = (1UL << (nr % BITS_PER_LONG));
+	long guess, oldval;
+	addr += nr / BITS_PER_LONG;
+	old = *addr;
+	do {
+		guess = oldval;
+		oldval = atomic64_cmpxchg((atomic64_t *)addr,
+					  guess, guess ^ mask);
+	} while (guess != oldval);
+}
+
+
+/*
+ * The test_and_xxx_bit() routines require a memory fence before we
+ * start the operation, and after the operation completes.  We use
+ * smp_mb() before, and rely on the "!= 0" comparison, plus a compiler
+ * barrier(), to block until the atomic op is complete.
+ */
+
+static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
+{
+	int val;
+	unsigned long mask = (1UL << (nr % BITS_PER_LONG));
+	smp_mb();  /* barrier for proper semantics */
+	val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
+	       & mask) != 0;
+	barrier();
+	return val;
+}
+
+
+static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
+{
+	int val;
+	unsigned long mask = (1UL << (nr % BITS_PER_LONG));
+	smp_mb();  /* barrier for proper semantics */
+	val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
+	       & mask) != 0;
+	barrier();
+	return val;
+}
+
+
+static inline int test_and_change_bit(unsigned nr,
+				      volatile unsigned long *addr)
+{
+	unsigned long mask = (1UL << (nr % BITS_PER_LONG));
+	long guess, oldval = *addr;
+	addr += nr / BITS_PER_LONG;
+	oldval = *addr;
+	do {
+		guess = oldval;
+		oldval = atomic64_cmpxchg((atomic64_t *)addr,
+					  guess, guess ^ mask);
+	} while (guess != oldval);
+	return (oldval & mask) != 0;
+}
+
+#define ext2_set_bit_atomic(lock, nr, addr)			\
+	test_and_set_bit((nr), (unsigned long *)(addr))
+#define ext2_clear_bit_atomic(lock, nr, addr)			\
+	test_and_clear_bit((nr), (unsigned long *)(addr))
+
+#endif /* _ASM_TILE_BITOPS_64_H */
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index c3ae570..bf95f55 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -215,8 +215,8 @@
 struct compat_siginfo;
 struct compat_sigaltstack;
 long compat_sys_execve(const char __user *path,
-		       const compat_uptr_t __user *argv,
-		       const compat_uptr_t __user *envp, struct pt_regs *);
+		       compat_uptr_t __user *argv,
+		       compat_uptr_t __user *envp, struct pt_regs *);
 long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
 			     struct compat_sigaction __user *oact,
 			     size_t sigsetsize);
diff --git a/arch/tile/include/asm/opcode-tile_64.h b/arch/tile/include/asm/opcode-tile_64.h
index 03df7b1..c063346 100644
--- a/arch/tile/include/asm/opcode-tile_64.h
+++ b/arch/tile/include/asm/opcode-tile_64.h
@@ -5,863 +5,711 @@
 #ifndef opcode_tile_h
 #define opcode_tile_h
 
-typedef unsigned long long tile_bundle_bits;
+typedef unsigned long long tilegx_bundle_bits;
 
 
 enum
 {
-  TILE_MAX_OPERANDS = 5 /* mm */
+  TILEGX_MAX_OPERANDS = 4 /* bfexts */
 };
 
 typedef enum
 {
-  TILE_OPC_BPT,
-  TILE_OPC_INFO,
-  TILE_OPC_INFOL,
-  TILE_OPC_J,
-  TILE_OPC_JAL,
-  TILE_OPC_MOVE,
-  TILE_OPC_MOVE_SN,
-  TILE_OPC_MOVEI,
-  TILE_OPC_MOVEI_SN,
-  TILE_OPC_MOVELI,
-  TILE_OPC_MOVELI_SN,
-  TILE_OPC_MOVELIS,
-  TILE_OPC_PREFETCH,
-  TILE_OPC_RAISE,
-  TILE_OPC_ADD,
-  TILE_OPC_ADD_SN,
-  TILE_OPC_ADDB,
-  TILE_OPC_ADDB_SN,
-  TILE_OPC_ADDBS_U,
-  TILE_OPC_ADDBS_U_SN,
-  TILE_OPC_ADDH,
-  TILE_OPC_ADDH_SN,
-  TILE_OPC_ADDHS,
-  TILE_OPC_ADDHS_SN,
-  TILE_OPC_ADDI,
-  TILE_OPC_ADDI_SN,
-  TILE_OPC_ADDIB,
-  TILE_OPC_ADDIB_SN,
-  TILE_OPC_ADDIH,
-  TILE_OPC_ADDIH_SN,
-  TILE_OPC_ADDLI,
-  TILE_OPC_ADDLI_SN,
-  TILE_OPC_ADDLIS,
-  TILE_OPC_ADDS,
-  TILE_OPC_ADDS_SN,
-  TILE_OPC_ADIFFB_U,
-  TILE_OPC_ADIFFB_U_SN,
-  TILE_OPC_ADIFFH,
-  TILE_OPC_ADIFFH_SN,
-  TILE_OPC_AND,
-  TILE_OPC_AND_SN,
-  TILE_OPC_ANDI,
-  TILE_OPC_ANDI_SN,
-  TILE_OPC_AULI,
-  TILE_OPC_AVGB_U,
-  TILE_OPC_AVGB_U_SN,
-  TILE_OPC_AVGH,
-  TILE_OPC_AVGH_SN,
-  TILE_OPC_BBNS,
-  TILE_OPC_BBNS_SN,
-  TILE_OPC_BBNST,
-  TILE_OPC_BBNST_SN,
-  TILE_OPC_BBS,
-  TILE_OPC_BBS_SN,
-  TILE_OPC_BBST,
-  TILE_OPC_BBST_SN,
-  TILE_OPC_BGEZ,
-  TILE_OPC_BGEZ_SN,
-  TILE_OPC_BGEZT,
-  TILE_OPC_BGEZT_SN,
-  TILE_OPC_BGZ,
-  TILE_OPC_BGZ_SN,
-  TILE_OPC_BGZT,
-  TILE_OPC_BGZT_SN,
-  TILE_OPC_BITX,
-  TILE_OPC_BITX_SN,
-  TILE_OPC_BLEZ,
-  TILE_OPC_BLEZ_SN,
-  TILE_OPC_BLEZT,
-  TILE_OPC_BLEZT_SN,
-  TILE_OPC_BLZ,
-  TILE_OPC_BLZ_SN,
-  TILE_OPC_BLZT,
-  TILE_OPC_BLZT_SN,
-  TILE_OPC_BNZ,
-  TILE_OPC_BNZ_SN,
-  TILE_OPC_BNZT,
-  TILE_OPC_BNZT_SN,
-  TILE_OPC_BYTEX,
-  TILE_OPC_BYTEX_SN,
-  TILE_OPC_BZ,
-  TILE_OPC_BZ_SN,
-  TILE_OPC_BZT,
-  TILE_OPC_BZT_SN,
-  TILE_OPC_CLZ,
-  TILE_OPC_CLZ_SN,
-  TILE_OPC_CRC32_32,
-  TILE_OPC_CRC32_32_SN,
-  TILE_OPC_CRC32_8,
-  TILE_OPC_CRC32_8_SN,
-  TILE_OPC_CTZ,
-  TILE_OPC_CTZ_SN,
-  TILE_OPC_DRAIN,
-  TILE_OPC_DTLBPR,
-  TILE_OPC_DWORD_ALIGN,
-  TILE_OPC_DWORD_ALIGN_SN,
-  TILE_OPC_FINV,
-  TILE_OPC_FLUSH,
-  TILE_OPC_FNOP,
-  TILE_OPC_ICOH,
-  TILE_OPC_ILL,
-  TILE_OPC_INTHB,
-  TILE_OPC_INTHB_SN,
-  TILE_OPC_INTHH,
-  TILE_OPC_INTHH_SN,
-  TILE_OPC_INTLB,
-  TILE_OPC_INTLB_SN,
-  TILE_OPC_INTLH,
-  TILE_OPC_INTLH_SN,
-  TILE_OPC_INV,
-  TILE_OPC_IRET,
-  TILE_OPC_JALB,
-  TILE_OPC_JALF,
-  TILE_OPC_JALR,
-  TILE_OPC_JALRP,
-  TILE_OPC_JB,
-  TILE_OPC_JF,
-  TILE_OPC_JR,
-  TILE_OPC_JRP,
-  TILE_OPC_LB,
-  TILE_OPC_LB_SN,
-  TILE_OPC_LB_U,
-  TILE_OPC_LB_U_SN,
-  TILE_OPC_LBADD,
-  TILE_OPC_LBADD_SN,
-  TILE_OPC_LBADD_U,
-  TILE_OPC_LBADD_U_SN,
-  TILE_OPC_LH,
-  TILE_OPC_LH_SN,
-  TILE_OPC_LH_U,
-  TILE_OPC_LH_U_SN,
-  TILE_OPC_LHADD,
-  TILE_OPC_LHADD_SN,
-  TILE_OPC_LHADD_U,
-  TILE_OPC_LHADD_U_SN,
-  TILE_OPC_LNK,
-  TILE_OPC_LNK_SN,
-  TILE_OPC_LW,
-  TILE_OPC_LW_SN,
-  TILE_OPC_LW_NA,
-  TILE_OPC_LW_NA_SN,
-  TILE_OPC_LWADD,
-  TILE_OPC_LWADD_SN,
-  TILE_OPC_LWADD_NA,
-  TILE_OPC_LWADD_NA_SN,
-  TILE_OPC_MAXB_U,
-  TILE_OPC_MAXB_U_SN,
-  TILE_OPC_MAXH,
-  TILE_OPC_MAXH_SN,
-  TILE_OPC_MAXIB_U,
-  TILE_OPC_MAXIB_U_SN,
-  TILE_OPC_MAXIH,
-  TILE_OPC_MAXIH_SN,
-  TILE_OPC_MF,
-  TILE_OPC_MFSPR,
-  TILE_OPC_MINB_U,
-  TILE_OPC_MINB_U_SN,
-  TILE_OPC_MINH,
-  TILE_OPC_MINH_SN,
-  TILE_OPC_MINIB_U,
-  TILE_OPC_MINIB_U_SN,
-  TILE_OPC_MINIH,
-  TILE_OPC_MINIH_SN,
-  TILE_OPC_MM,
-  TILE_OPC_MNZ,
-  TILE_OPC_MNZ_SN,
-  TILE_OPC_MNZB,
-  TILE_OPC_MNZB_SN,
-  TILE_OPC_MNZH,
-  TILE_OPC_MNZH_SN,
-  TILE_OPC_MTSPR,
-  TILE_OPC_MULHH_SS,
-  TILE_OPC_MULHH_SS_SN,
-  TILE_OPC_MULHH_SU,
-  TILE_OPC_MULHH_SU_SN,
-  TILE_OPC_MULHH_UU,
-  TILE_OPC_MULHH_UU_SN,
-  TILE_OPC_MULHHA_SS,
-  TILE_OPC_MULHHA_SS_SN,
-  TILE_OPC_MULHHA_SU,
-  TILE_OPC_MULHHA_SU_SN,
-  TILE_OPC_MULHHA_UU,
-  TILE_OPC_MULHHA_UU_SN,
-  TILE_OPC_MULHHSA_UU,
-  TILE_OPC_MULHHSA_UU_SN,
-  TILE_OPC_MULHL_SS,
-  TILE_OPC_MULHL_SS_SN,
-  TILE_OPC_MULHL_SU,
-  TILE_OPC_MULHL_SU_SN,
-  TILE_OPC_MULHL_US,
-  TILE_OPC_MULHL_US_SN,
-  TILE_OPC_MULHL_UU,
-  TILE_OPC_MULHL_UU_SN,
-  TILE_OPC_MULHLA_SS,
-  TILE_OPC_MULHLA_SS_SN,
-  TILE_OPC_MULHLA_SU,
-  TILE_OPC_MULHLA_SU_SN,
-  TILE_OPC_MULHLA_US,
-  TILE_OPC_MULHLA_US_SN,
-  TILE_OPC_MULHLA_UU,
-  TILE_OPC_MULHLA_UU_SN,
-  TILE_OPC_MULHLSA_UU,
-  TILE_OPC_MULHLSA_UU_SN,
-  TILE_OPC_MULLL_SS,
-  TILE_OPC_MULLL_SS_SN,
-  TILE_OPC_MULLL_SU,
-  TILE_OPC_MULLL_SU_SN,
-  TILE_OPC_MULLL_UU,
-  TILE_OPC_MULLL_UU_SN,
-  TILE_OPC_MULLLA_SS,
-  TILE_OPC_MULLLA_SS_SN,
-  TILE_OPC_MULLLA_SU,
-  TILE_OPC_MULLLA_SU_SN,
-  TILE_OPC_MULLLA_UU,
-  TILE_OPC_MULLLA_UU_SN,
-  TILE_OPC_MULLLSA_UU,
-  TILE_OPC_MULLLSA_UU_SN,
-  TILE_OPC_MVNZ,
-  TILE_OPC_MVNZ_SN,
-  TILE_OPC_MVZ,
-  TILE_OPC_MVZ_SN,
-  TILE_OPC_MZ,
-  TILE_OPC_MZ_SN,
-  TILE_OPC_MZB,
-  TILE_OPC_MZB_SN,
-  TILE_OPC_MZH,
-  TILE_OPC_MZH_SN,
-  TILE_OPC_NAP,
-  TILE_OPC_NOP,
-  TILE_OPC_NOR,
-  TILE_OPC_NOR_SN,
-  TILE_OPC_OR,
-  TILE_OPC_OR_SN,
-  TILE_OPC_ORI,
-  TILE_OPC_ORI_SN,
-  TILE_OPC_PACKBS_U,
-  TILE_OPC_PACKBS_U_SN,
-  TILE_OPC_PACKHB,
-  TILE_OPC_PACKHB_SN,
-  TILE_OPC_PACKHS,
-  TILE_OPC_PACKHS_SN,
-  TILE_OPC_PACKLB,
-  TILE_OPC_PACKLB_SN,
-  TILE_OPC_PCNT,
-  TILE_OPC_PCNT_SN,
-  TILE_OPC_RL,
-  TILE_OPC_RL_SN,
-  TILE_OPC_RLI,
-  TILE_OPC_RLI_SN,
-  TILE_OPC_S1A,
-  TILE_OPC_S1A_SN,
-  TILE_OPC_S2A,
-  TILE_OPC_S2A_SN,
-  TILE_OPC_S3A,
-  TILE_OPC_S3A_SN,
-  TILE_OPC_SADAB_U,
-  TILE_OPC_SADAB_U_SN,
-  TILE_OPC_SADAH,
-  TILE_OPC_SADAH_SN,
-  TILE_OPC_SADAH_U,
-  TILE_OPC_SADAH_U_SN,
-  TILE_OPC_SADB_U,
-  TILE_OPC_SADB_U_SN,
-  TILE_OPC_SADH,
-  TILE_OPC_SADH_SN,
-  TILE_OPC_SADH_U,
-  TILE_OPC_SADH_U_SN,
-  TILE_OPC_SB,
-  TILE_OPC_SBADD,
-  TILE_OPC_SEQ,
-  TILE_OPC_SEQ_SN,
-  TILE_OPC_SEQB,
-  TILE_OPC_SEQB_SN,
-  TILE_OPC_SEQH,
-  TILE_OPC_SEQH_SN,
-  TILE_OPC_SEQI,
-  TILE_OPC_SEQI_SN,
-  TILE_OPC_SEQIB,
-  TILE_OPC_SEQIB_SN,
-  TILE_OPC_SEQIH,
-  TILE_OPC_SEQIH_SN,
-  TILE_OPC_SH,
-  TILE_OPC_SHADD,
-  TILE_OPC_SHL,
-  TILE_OPC_SHL_SN,
-  TILE_OPC_SHLB,
-  TILE_OPC_SHLB_SN,
-  TILE_OPC_SHLH,
-  TILE_OPC_SHLH_SN,
-  TILE_OPC_SHLI,
-  TILE_OPC_SHLI_SN,
-  TILE_OPC_SHLIB,
-  TILE_OPC_SHLIB_SN,
-  TILE_OPC_SHLIH,
-  TILE_OPC_SHLIH_SN,
-  TILE_OPC_SHR,
-  TILE_OPC_SHR_SN,
-  TILE_OPC_SHRB,
-  TILE_OPC_SHRB_SN,
-  TILE_OPC_SHRH,
-  TILE_OPC_SHRH_SN,
-  TILE_OPC_SHRI,
-  TILE_OPC_SHRI_SN,
-  TILE_OPC_SHRIB,
-  TILE_OPC_SHRIB_SN,
-  TILE_OPC_SHRIH,
-  TILE_OPC_SHRIH_SN,
-  TILE_OPC_SLT,
-  TILE_OPC_SLT_SN,
-  TILE_OPC_SLT_U,
-  TILE_OPC_SLT_U_SN,
-  TILE_OPC_SLTB,
-  TILE_OPC_SLTB_SN,
-  TILE_OPC_SLTB_U,
-  TILE_OPC_SLTB_U_SN,
-  TILE_OPC_SLTE,
-  TILE_OPC_SLTE_SN,
-  TILE_OPC_SLTE_U,
-  TILE_OPC_SLTE_U_SN,
-  TILE_OPC_SLTEB,
-  TILE_OPC_SLTEB_SN,
-  TILE_OPC_SLTEB_U,
-  TILE_OPC_SLTEB_U_SN,
-  TILE_OPC_SLTEH,
-  TILE_OPC_SLTEH_SN,
-  TILE_OPC_SLTEH_U,
-  TILE_OPC_SLTEH_U_SN,
-  TILE_OPC_SLTH,
-  TILE_OPC_SLTH_SN,
-  TILE_OPC_SLTH_U,
-  TILE_OPC_SLTH_U_SN,
-  TILE_OPC_SLTI,
-  TILE_OPC_SLTI_SN,
-  TILE_OPC_SLTI_U,
-  TILE_OPC_SLTI_U_SN,
-  TILE_OPC_SLTIB,
-  TILE_OPC_SLTIB_SN,
-  TILE_OPC_SLTIB_U,
-  TILE_OPC_SLTIB_U_SN,
-  TILE_OPC_SLTIH,
-  TILE_OPC_SLTIH_SN,
-  TILE_OPC_SLTIH_U,
-  TILE_OPC_SLTIH_U_SN,
-  TILE_OPC_SNE,
-  TILE_OPC_SNE_SN,
-  TILE_OPC_SNEB,
-  TILE_OPC_SNEB_SN,
-  TILE_OPC_SNEH,
-  TILE_OPC_SNEH_SN,
-  TILE_OPC_SRA,
-  TILE_OPC_SRA_SN,
-  TILE_OPC_SRAB,
-  TILE_OPC_SRAB_SN,
-  TILE_OPC_SRAH,
-  TILE_OPC_SRAH_SN,
-  TILE_OPC_SRAI,
-  TILE_OPC_SRAI_SN,
-  TILE_OPC_SRAIB,
-  TILE_OPC_SRAIB_SN,
-  TILE_OPC_SRAIH,
-  TILE_OPC_SRAIH_SN,
-  TILE_OPC_SUB,
-  TILE_OPC_SUB_SN,
-  TILE_OPC_SUBB,
-  TILE_OPC_SUBB_SN,
-  TILE_OPC_SUBBS_U,
-  TILE_OPC_SUBBS_U_SN,
-  TILE_OPC_SUBH,
-  TILE_OPC_SUBH_SN,
-  TILE_OPC_SUBHS,
-  TILE_OPC_SUBHS_SN,
-  TILE_OPC_SUBS,
-  TILE_OPC_SUBS_SN,
-  TILE_OPC_SW,
-  TILE_OPC_SWADD,
-  TILE_OPC_SWINT0,
-  TILE_OPC_SWINT1,
-  TILE_OPC_SWINT2,
-  TILE_OPC_SWINT3,
-  TILE_OPC_TBLIDXB0,
-  TILE_OPC_TBLIDXB0_SN,
-  TILE_OPC_TBLIDXB1,
-  TILE_OPC_TBLIDXB1_SN,
-  TILE_OPC_TBLIDXB2,
-  TILE_OPC_TBLIDXB2_SN,
-  TILE_OPC_TBLIDXB3,
-  TILE_OPC_TBLIDXB3_SN,
-  TILE_OPC_TNS,
-  TILE_OPC_TNS_SN,
-  TILE_OPC_WH64,
-  TILE_OPC_XOR,
-  TILE_OPC_XOR_SN,
-  TILE_OPC_XORI,
-  TILE_OPC_XORI_SN,
-  TILE_OPC_NONE
-} tile_mnemonic;
+  TILEGX_OPC_BPT,
+  TILEGX_OPC_INFO,
+  TILEGX_OPC_INFOL,
+  TILEGX_OPC_MOVE,
+  TILEGX_OPC_MOVEI,
+  TILEGX_OPC_MOVELI,
+  TILEGX_OPC_PREFETCH,
+  TILEGX_OPC_PREFETCH_ADD_L1,
+  TILEGX_OPC_PREFETCH_ADD_L1_FAULT,
+  TILEGX_OPC_PREFETCH_ADD_L2,
+  TILEGX_OPC_PREFETCH_ADD_L2_FAULT,
+  TILEGX_OPC_PREFETCH_ADD_L3,
+  TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
+  TILEGX_OPC_PREFETCH_L1,
+  TILEGX_OPC_PREFETCH_L1_FAULT,
+  TILEGX_OPC_PREFETCH_L2,
+  TILEGX_OPC_PREFETCH_L2_FAULT,
+  TILEGX_OPC_PREFETCH_L3,
+  TILEGX_OPC_PREFETCH_L3_FAULT,
+  TILEGX_OPC_RAISE,
+  TILEGX_OPC_ADD,
+  TILEGX_OPC_ADDI,
+  TILEGX_OPC_ADDLI,
+  TILEGX_OPC_ADDX,
+  TILEGX_OPC_ADDXI,
+  TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXSC,
+  TILEGX_OPC_AND,
+  TILEGX_OPC_ANDI,
+  TILEGX_OPC_BEQZ,
+  TILEGX_OPC_BEQZT,
+  TILEGX_OPC_BFEXTS,
+  TILEGX_OPC_BFEXTU,
+  TILEGX_OPC_BFINS,
+  TILEGX_OPC_BGEZ,
+  TILEGX_OPC_BGEZT,
+  TILEGX_OPC_BGTZ,
+  TILEGX_OPC_BGTZT,
+  TILEGX_OPC_BLBC,
+  TILEGX_OPC_BLBCT,
+  TILEGX_OPC_BLBS,
+  TILEGX_OPC_BLBST,
+  TILEGX_OPC_BLEZ,
+  TILEGX_OPC_BLEZT,
+  TILEGX_OPC_BLTZ,
+  TILEGX_OPC_BLTZT,
+  TILEGX_OPC_BNEZ,
+  TILEGX_OPC_BNEZT,
+  TILEGX_OPC_CLZ,
+  TILEGX_OPC_CMOVEQZ,
+  TILEGX_OPC_CMOVNEZ,
+  TILEGX_OPC_CMPEQ,
+  TILEGX_OPC_CMPEQI,
+  TILEGX_OPC_CMPEXCH,
+  TILEGX_OPC_CMPEXCH4,
+  TILEGX_OPC_CMPLES,
+  TILEGX_OPC_CMPLEU,
+  TILEGX_OPC_CMPLTS,
+  TILEGX_OPC_CMPLTSI,
+  TILEGX_OPC_CMPLTU,
+  TILEGX_OPC_CMPLTUI,
+  TILEGX_OPC_CMPNE,
+  TILEGX_OPC_CMUL,
+  TILEGX_OPC_CMULA,
+  TILEGX_OPC_CMULAF,
+  TILEGX_OPC_CMULF,
+  TILEGX_OPC_CMULFR,
+  TILEGX_OPC_CMULH,
+  TILEGX_OPC_CMULHR,
+  TILEGX_OPC_CRC32_32,
+  TILEGX_OPC_CRC32_8,
+  TILEGX_OPC_CTZ,
+  TILEGX_OPC_DBLALIGN,
+  TILEGX_OPC_DBLALIGN2,
+  TILEGX_OPC_DBLALIGN4,
+  TILEGX_OPC_DBLALIGN6,
+  TILEGX_OPC_DRAIN,
+  TILEGX_OPC_DTLBPR,
+  TILEGX_OPC_EXCH,
+  TILEGX_OPC_EXCH4,
+  TILEGX_OPC_FDOUBLE_ADD_FLAGS,
+  TILEGX_OPC_FDOUBLE_ADDSUB,
+  TILEGX_OPC_FDOUBLE_MUL_FLAGS,
+  TILEGX_OPC_FDOUBLE_PACK1,
+  TILEGX_OPC_FDOUBLE_PACK2,
+  TILEGX_OPC_FDOUBLE_SUB_FLAGS,
+  TILEGX_OPC_FDOUBLE_UNPACK_MAX,
+  TILEGX_OPC_FDOUBLE_UNPACK_MIN,
+  TILEGX_OPC_FETCHADD,
+  TILEGX_OPC_FETCHADD4,
+  TILEGX_OPC_FETCHADDGEZ,
+  TILEGX_OPC_FETCHADDGEZ4,
+  TILEGX_OPC_FETCHAND,
+  TILEGX_OPC_FETCHAND4,
+  TILEGX_OPC_FETCHOR,
+  TILEGX_OPC_FETCHOR4,
+  TILEGX_OPC_FINV,
+  TILEGX_OPC_FLUSH,
+  TILEGX_OPC_FLUSHWB,
+  TILEGX_OPC_FNOP,
+  TILEGX_OPC_FSINGLE_ADD1,
+  TILEGX_OPC_FSINGLE_ADDSUB2,
+  TILEGX_OPC_FSINGLE_MUL1,
+  TILEGX_OPC_FSINGLE_MUL2,
+  TILEGX_OPC_FSINGLE_PACK1,
+  TILEGX_OPC_FSINGLE_PACK2,
+  TILEGX_OPC_FSINGLE_SUB1,
+  TILEGX_OPC_ICOH,
+  TILEGX_OPC_ILL,
+  TILEGX_OPC_INV,
+  TILEGX_OPC_IRET,
+  TILEGX_OPC_J,
+  TILEGX_OPC_JAL,
+  TILEGX_OPC_JALR,
+  TILEGX_OPC_JALRP,
+  TILEGX_OPC_JR,
+  TILEGX_OPC_JRP,
+  TILEGX_OPC_LD,
+  TILEGX_OPC_LD1S,
+  TILEGX_OPC_LD1S_ADD,
+  TILEGX_OPC_LD1U,
+  TILEGX_OPC_LD1U_ADD,
+  TILEGX_OPC_LD2S,
+  TILEGX_OPC_LD2S_ADD,
+  TILEGX_OPC_LD2U,
+  TILEGX_OPC_LD2U_ADD,
+  TILEGX_OPC_LD4S,
+  TILEGX_OPC_LD4S_ADD,
+  TILEGX_OPC_LD4U,
+  TILEGX_OPC_LD4U_ADD,
+  TILEGX_OPC_LD_ADD,
+  TILEGX_OPC_LDNA,
+  TILEGX_OPC_LDNA_ADD,
+  TILEGX_OPC_LDNT,
+  TILEGX_OPC_LDNT1S,
+  TILEGX_OPC_LDNT1S_ADD,
+  TILEGX_OPC_LDNT1U,
+  TILEGX_OPC_LDNT1U_ADD,
+  TILEGX_OPC_LDNT2S,
+  TILEGX_OPC_LDNT2S_ADD,
+  TILEGX_OPC_LDNT2U,
+  TILEGX_OPC_LDNT2U_ADD,
+  TILEGX_OPC_LDNT4S,
+  TILEGX_OPC_LDNT4S_ADD,
+  TILEGX_OPC_LDNT4U,
+  TILEGX_OPC_LDNT4U_ADD,
+  TILEGX_OPC_LDNT_ADD,
+  TILEGX_OPC_LNK,
+  TILEGX_OPC_MF,
+  TILEGX_OPC_MFSPR,
+  TILEGX_OPC_MM,
+  TILEGX_OPC_MNZ,
+  TILEGX_OPC_MTSPR,
+  TILEGX_OPC_MUL_HS_HS,
+  TILEGX_OPC_MUL_HS_HU,
+  TILEGX_OPC_MUL_HS_LS,
+  TILEGX_OPC_MUL_HS_LU,
+  TILEGX_OPC_MUL_HU_HU,
+  TILEGX_OPC_MUL_HU_LS,
+  TILEGX_OPC_MUL_HU_LU,
+  TILEGX_OPC_MUL_LS_LS,
+  TILEGX_OPC_MUL_LS_LU,
+  TILEGX_OPC_MUL_LU_LU,
+  TILEGX_OPC_MULA_HS_HS,
+  TILEGX_OPC_MULA_HS_HU,
+  TILEGX_OPC_MULA_HS_LS,
+  TILEGX_OPC_MULA_HS_LU,
+  TILEGX_OPC_MULA_HU_HU,
+  TILEGX_OPC_MULA_HU_LS,
+  TILEGX_OPC_MULA_HU_LU,
+  TILEGX_OPC_MULA_LS_LS,
+  TILEGX_OPC_MULA_LS_LU,
+  TILEGX_OPC_MULA_LU_LU,
+  TILEGX_OPC_MULAX,
+  TILEGX_OPC_MULX,
+  TILEGX_OPC_MZ,
+  TILEGX_OPC_NAP,
+  TILEGX_OPC_NOP,
+  TILEGX_OPC_NOR,
+  TILEGX_OPC_OR,
+  TILEGX_OPC_ORI,
+  TILEGX_OPC_PCNT,
+  TILEGX_OPC_REVBITS,
+  TILEGX_OPC_REVBYTES,
+  TILEGX_OPC_ROTL,
+  TILEGX_OPC_ROTLI,
+  TILEGX_OPC_SHL,
+  TILEGX_OPC_SHL16INSLI,
+  TILEGX_OPC_SHL1ADD,
+  TILEGX_OPC_SHL1ADDX,
+  TILEGX_OPC_SHL2ADD,
+  TILEGX_OPC_SHL2ADDX,
+  TILEGX_OPC_SHL3ADD,
+  TILEGX_OPC_SHL3ADDX,
+  TILEGX_OPC_SHLI,
+  TILEGX_OPC_SHLX,
+  TILEGX_OPC_SHLXI,
+  TILEGX_OPC_SHRS,
+  TILEGX_OPC_SHRSI,
+  TILEGX_OPC_SHRU,
+  TILEGX_OPC_SHRUI,
+  TILEGX_OPC_SHRUX,
+  TILEGX_OPC_SHRUXI,
+  TILEGX_OPC_SHUFFLEBYTES,
+  TILEGX_OPC_ST,
+  TILEGX_OPC_ST1,
+  TILEGX_OPC_ST1_ADD,
+  TILEGX_OPC_ST2,
+  TILEGX_OPC_ST2_ADD,
+  TILEGX_OPC_ST4,
+  TILEGX_OPC_ST4_ADD,
+  TILEGX_OPC_ST_ADD,
+  TILEGX_OPC_STNT,
+  TILEGX_OPC_STNT1,
+  TILEGX_OPC_STNT1_ADD,
+  TILEGX_OPC_STNT2,
+  TILEGX_OPC_STNT2_ADD,
+  TILEGX_OPC_STNT4,
+  TILEGX_OPC_STNT4_ADD,
+  TILEGX_OPC_STNT_ADD,
+  TILEGX_OPC_SUB,
+  TILEGX_OPC_SUBX,
+  TILEGX_OPC_SUBXSC,
+  TILEGX_OPC_SWINT0,
+  TILEGX_OPC_SWINT1,
+  TILEGX_OPC_SWINT2,
+  TILEGX_OPC_SWINT3,
+  TILEGX_OPC_TBLIDXB0,
+  TILEGX_OPC_TBLIDXB1,
+  TILEGX_OPC_TBLIDXB2,
+  TILEGX_OPC_TBLIDXB3,
+  TILEGX_OPC_V1ADD,
+  TILEGX_OPC_V1ADDI,
+  TILEGX_OPC_V1ADDUC,
+  TILEGX_OPC_V1ADIFFU,
+  TILEGX_OPC_V1AVGU,
+  TILEGX_OPC_V1CMPEQ,
+  TILEGX_OPC_V1CMPEQI,
+  TILEGX_OPC_V1CMPLES,
+  TILEGX_OPC_V1CMPLEU,
+  TILEGX_OPC_V1CMPLTS,
+  TILEGX_OPC_V1CMPLTSI,
+  TILEGX_OPC_V1CMPLTU,
+  TILEGX_OPC_V1CMPLTUI,
+  TILEGX_OPC_V1CMPNE,
+  TILEGX_OPC_V1DDOTPU,
+  TILEGX_OPC_V1DDOTPUA,
+  TILEGX_OPC_V1DDOTPUS,
+  TILEGX_OPC_V1DDOTPUSA,
+  TILEGX_OPC_V1DOTP,
+  TILEGX_OPC_V1DOTPA,
+  TILEGX_OPC_V1DOTPU,
+  TILEGX_OPC_V1DOTPUA,
+  TILEGX_OPC_V1DOTPUS,
+  TILEGX_OPC_V1DOTPUSA,
+  TILEGX_OPC_V1INT_H,
+  TILEGX_OPC_V1INT_L,
+  TILEGX_OPC_V1MAXU,
+  TILEGX_OPC_V1MAXUI,
+  TILEGX_OPC_V1MINU,
+  TILEGX_OPC_V1MINUI,
+  TILEGX_OPC_V1MNZ,
+  TILEGX_OPC_V1MULTU,
+  TILEGX_OPC_V1MULU,
+  TILEGX_OPC_V1MULUS,
+  TILEGX_OPC_V1MZ,
+  TILEGX_OPC_V1SADAU,
+  TILEGX_OPC_V1SADU,
+  TILEGX_OPC_V1SHL,
+  TILEGX_OPC_V1SHLI,
+  TILEGX_OPC_V1SHRS,
+  TILEGX_OPC_V1SHRSI,
+  TILEGX_OPC_V1SHRU,
+  TILEGX_OPC_V1SHRUI,
+  TILEGX_OPC_V1SUB,
+  TILEGX_OPC_V1SUBUC,
+  TILEGX_OPC_V2ADD,
+  TILEGX_OPC_V2ADDI,
+  TILEGX_OPC_V2ADDSC,
+  TILEGX_OPC_V2ADIFFS,
+  TILEGX_OPC_V2AVGS,
+  TILEGX_OPC_V2CMPEQ,
+  TILEGX_OPC_V2CMPEQI,
+  TILEGX_OPC_V2CMPLES,
+  TILEGX_OPC_V2CMPLEU,
+  TILEGX_OPC_V2CMPLTS,
+  TILEGX_OPC_V2CMPLTSI,
+  TILEGX_OPC_V2CMPLTU,
+  TILEGX_OPC_V2CMPLTUI,
+  TILEGX_OPC_V2CMPNE,
+  TILEGX_OPC_V2DOTP,
+  TILEGX_OPC_V2DOTPA,
+  TILEGX_OPC_V2INT_H,
+  TILEGX_OPC_V2INT_L,
+  TILEGX_OPC_V2MAXS,
+  TILEGX_OPC_V2MAXSI,
+  TILEGX_OPC_V2MINS,
+  TILEGX_OPC_V2MINSI,
+  TILEGX_OPC_V2MNZ,
+  TILEGX_OPC_V2MULFSC,
+  TILEGX_OPC_V2MULS,
+  TILEGX_OPC_V2MULTS,
+  TILEGX_OPC_V2MZ,
+  TILEGX_OPC_V2PACKH,
+  TILEGX_OPC_V2PACKL,
+  TILEGX_OPC_V2PACKUC,
+  TILEGX_OPC_V2SADAS,
+  TILEGX_OPC_V2SADAU,
+  TILEGX_OPC_V2SADS,
+  TILEGX_OPC_V2SADU,
+  TILEGX_OPC_V2SHL,
+  TILEGX_OPC_V2SHLI,
+  TILEGX_OPC_V2SHLSC,
+  TILEGX_OPC_V2SHRS,
+  TILEGX_OPC_V2SHRSI,
+  TILEGX_OPC_V2SHRU,
+  TILEGX_OPC_V2SHRUI,
+  TILEGX_OPC_V2SUB,
+  TILEGX_OPC_V2SUBSC,
+  TILEGX_OPC_V4ADD,
+  TILEGX_OPC_V4ADDSC,
+  TILEGX_OPC_V4INT_H,
+  TILEGX_OPC_V4INT_L,
+  TILEGX_OPC_V4PACKSC,
+  TILEGX_OPC_V4SHL,
+  TILEGX_OPC_V4SHLSC,
+  TILEGX_OPC_V4SHRS,
+  TILEGX_OPC_V4SHRU,
+  TILEGX_OPC_V4SUB,
+  TILEGX_OPC_V4SUBSC,
+  TILEGX_OPC_WH64,
+  TILEGX_OPC_XOR,
+  TILEGX_OPC_XORI,
+  TILEGX_OPC_NONE
+} tilegx_mnemonic;
 
 /* 64-bit pattern for a { bpt ; nop } bundle. */
-#define TILE_BPT_BUNDLE 0x400b3cae70166000ULL
+#define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL
 
 
-#define TILE_ELF_MACHINE_CODE EM_TILEPRO
+#define TILE_ELF_MACHINE_CODE EM_TILE64
 
-#define TILE_ELF_NAME "elf32-tilepro"
+#define TILE_ELF_NAME "elf32-tile64"
 
 
 static __inline unsigned int
-get_BrOff_SN(tile_bundle_bits num)
+get_BFEnd_X0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((n >> 0)) & 0x3ff);
+  return (((n >> 12)) & 0x3f);
 }
 
 static __inline unsigned int
-get_BrOff_X1(tile_bundle_bits n)
+get_BFOpcodeExtension_X0(tilegx_bundle_bits num)
 {
-  return (((unsigned int)(n >> 43)) & 0x00007fff) |
-         (((unsigned int)(n >> 20)) & 0x00018000);
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 24)) & 0xf);
 }
 
 static __inline unsigned int
-get_BrType_X1(tile_bundle_bits n)
+get_BFStart_X0(tilegx_bundle_bits num)
 {
-  return (((unsigned int)(n >> 31)) & 0xf);
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 18)) & 0x3f);
 }
 
 static __inline unsigned int
-get_Dest_Imm8_X1(tile_bundle_bits n)
+get_BrOff_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 31)) & 0x0000003f) |
+         (((unsigned int)(n >> 37)) & 0x0001ffc0);
+}
+
+static __inline unsigned int
+get_BrType_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 54)) & 0x1f);
+}
+
+static __inline unsigned int
+get_Dest_Imm8_X1(tilegx_bundle_bits n)
 {
   return (((unsigned int)(n >> 31)) & 0x0000003f) |
          (((unsigned int)(n >> 43)) & 0x000000c0);
 }
 
 static __inline unsigned int
-get_Dest_SN(tile_bundle_bits num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((n >> 2)) & 0x3);
-}
-
-static __inline unsigned int
-get_Dest_X0(tile_bundle_bits num)
+get_Dest_X0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
   return (((n >> 0)) & 0x3f);
 }
 
 static __inline unsigned int
-get_Dest_X1(tile_bundle_bits n)
+get_Dest_X1(tilegx_bundle_bits n)
 {
   return (((unsigned int)(n >> 31)) & 0x3f);
 }
 
 static __inline unsigned int
-get_Dest_Y0(tile_bundle_bits num)
+get_Dest_Y0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
   return (((n >> 0)) & 0x3f);
 }
 
 static __inline unsigned int
-get_Dest_Y1(tile_bundle_bits n)
+get_Dest_Y1(tilegx_bundle_bits n)
 {
   return (((unsigned int)(n >> 31)) & 0x3f);
 }
 
 static __inline unsigned int
-get_Imm16_X0(tile_bundle_bits num)
+get_Imm16_X0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
   return (((n >> 12)) & 0xffff);
 }
 
 static __inline unsigned int
-get_Imm16_X1(tile_bundle_bits n)
+get_Imm16_X1(tilegx_bundle_bits n)
 {
   return (((unsigned int)(n >> 43)) & 0xffff);
 }
 
 static __inline unsigned int
-get_Imm8_SN(tile_bundle_bits num)
+get_Imm8OpcodeExtension_X0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((n >> 0)) & 0xff);
+  return (((n >> 20)) & 0xff);
 }
 
 static __inline unsigned int
-get_Imm8_X0(tile_bundle_bits num)
+get_Imm8OpcodeExtension_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 51)) & 0xff);
+}
+
+static __inline unsigned int
+get_Imm8_X0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
   return (((n >> 12)) & 0xff);
 }
 
 static __inline unsigned int
-get_Imm8_X1(tile_bundle_bits n)
+get_Imm8_X1(tilegx_bundle_bits n)
 {
   return (((unsigned int)(n >> 43)) & 0xff);
 }
 
 static __inline unsigned int
-get_Imm8_Y0(tile_bundle_bits num)
+get_Imm8_Y0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
   return (((n >> 12)) & 0xff);
 }
 
 static __inline unsigned int
-get_Imm8_Y1(tile_bundle_bits n)
+get_Imm8_Y1(tilegx_bundle_bits n)
 {
   return (((unsigned int)(n >> 43)) & 0xff);
 }
 
 static __inline unsigned int
-get_ImmOpcodeExtension_X0(tile_bundle_bits num)
+get_JumpOff_X1(tilegx_bundle_bits n)
 {
-  const unsigned int n = (unsigned int)num;
-  return (((n >> 20)) & 0x7f);
+  return (((unsigned int)(n >> 31)) & 0x7ffffff);
 }
 
 static __inline unsigned int
-get_ImmOpcodeExtension_X1(tile_bundle_bits n)
+get_JumpOpcodeExtension_X1(tilegx_bundle_bits n)
 {
-  return (((unsigned int)(n >> 51)) & 0x7f);
+  return (((unsigned int)(n >> 58)) & 0x1);
 }
 
 static __inline unsigned int
-get_ImmRROpcodeExtension_SN(tile_bundle_bits num)
+get_MF_Imm14_X1(tilegx_bundle_bits n)
 {
-  const unsigned int n = (unsigned int)num;
-  return (((n >> 8)) & 0x3);
+  return (((unsigned int)(n >> 37)) & 0x3fff);
 }
 
 static __inline unsigned int
-get_JOffLong_X1(tile_bundle_bits n)
-{
-  return (((unsigned int)(n >> 43)) & 0x00007fff) |
-         (((unsigned int)(n >> 20)) & 0x00018000) |
-         (((unsigned int)(n >> 14)) & 0x001e0000) |
-         (((unsigned int)(n >> 16)) & 0x07e00000) |
-         (((unsigned int)(n >> 31)) & 0x18000000);
-}
-
-static __inline unsigned int
-get_JOff_X1(tile_bundle_bits n)
-{
-  return (((unsigned int)(n >> 43)) & 0x00007fff) |
-         (((unsigned int)(n >> 20)) & 0x00018000) |
-         (((unsigned int)(n >> 14)) & 0x001e0000) |
-         (((unsigned int)(n >> 16)) & 0x07e00000) |
-         (((unsigned int)(n >> 31)) & 0x08000000);
-}
-
-static __inline unsigned int
-get_MF_Imm15_X1(tile_bundle_bits n)
-{
-  return (((unsigned int)(n >> 37)) & 0x00003fff) |
-         (((unsigned int)(n >> 44)) & 0x00004000);
-}
-
-static __inline unsigned int
-get_MMEnd_X0(tile_bundle_bits num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((n >> 18)) & 0x1f);
-}
-
-static __inline unsigned int
-get_MMEnd_X1(tile_bundle_bits n)
-{
-  return (((unsigned int)(n >> 49)) & 0x1f);
-}
-
-static __inline unsigned int
-get_MMStart_X0(tile_bundle_bits num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((n >> 23)) & 0x1f);
-}
-
-static __inline unsigned int
-get_MMStart_X1(tile_bundle_bits n)
-{
-  return (((unsigned int)(n >> 54)) & 0x1f);
-}
-
-static __inline unsigned int
-get_MT_Imm15_X1(tile_bundle_bits n)
+get_MT_Imm14_X1(tilegx_bundle_bits n)
 {
   return (((unsigned int)(n >> 31)) & 0x0000003f) |
-         (((unsigned int)(n >> 37)) & 0x00003fc0) |
-         (((unsigned int)(n >> 44)) & 0x00004000);
+         (((unsigned int)(n >> 37)) & 0x00003fc0);
 }
 
 static __inline unsigned int
-get_Mode(tile_bundle_bits n)
+get_Mode(tilegx_bundle_bits n)
 {
-  return (((unsigned int)(n >> 63)) & 0x1);
+  return (((unsigned int)(n >> 62)) & 0x3);
 }
 
 static __inline unsigned int
-get_NoRegOpcodeExtension_SN(tile_bundle_bits num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((n >> 0)) & 0xf);
-}
-
-static __inline unsigned int
-get_Opcode_SN(tile_bundle_bits num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((n >> 10)) & 0x3f);
-}
-
-static __inline unsigned int
-get_Opcode_X0(tile_bundle_bits num)
+get_Opcode_X0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
   return (((n >> 28)) & 0x7);
 }
 
 static __inline unsigned int
-get_Opcode_X1(tile_bundle_bits n)
+get_Opcode_X1(tilegx_bundle_bits n)
 {
-  return (((unsigned int)(n >> 59)) & 0xf);
+  return (((unsigned int)(n >> 59)) & 0x7);
 }
 
 static __inline unsigned int
-get_Opcode_Y0(tile_bundle_bits num)
+get_Opcode_Y0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
   return (((n >> 27)) & 0xf);
 }
 
 static __inline unsigned int
-get_Opcode_Y1(tile_bundle_bits n)
+get_Opcode_Y1(tilegx_bundle_bits n)
 {
-  return (((unsigned int)(n >> 59)) & 0xf);
+  return (((unsigned int)(n >> 58)) & 0xf);
 }
 
 static __inline unsigned int
-get_Opcode_Y2(tile_bundle_bits n)
+get_Opcode_Y2(tilegx_bundle_bits n)
 {
-  return (((unsigned int)(n >> 56)) & 0x7);
+  return (((n >> 26)) & 0x00000001) |
+         (((unsigned int)(n >> 56)) & 0x00000002);
 }
 
 static __inline unsigned int
-get_RROpcodeExtension_SN(tile_bundle_bits num)
+get_RRROpcodeExtension_X0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((n >> 4)) & 0xf);
+  return (((n >> 18)) & 0x3ff);
 }
 
 static __inline unsigned int
-get_RRROpcodeExtension_X0(tile_bundle_bits num)
+get_RRROpcodeExtension_X1(tilegx_bundle_bits n)
 {
-  const unsigned int n = (unsigned int)num;
-  return (((n >> 18)) & 0x1ff);
+  return (((unsigned int)(n >> 49)) & 0x3ff);
 }
 
 static __inline unsigned int
-get_RRROpcodeExtension_X1(tile_bundle_bits n)
-{
-  return (((unsigned int)(n >> 49)) & 0x1ff);
-}
-
-static __inline unsigned int
-get_RRROpcodeExtension_Y0(tile_bundle_bits num)
+get_RRROpcodeExtension_Y0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
   return (((n >> 18)) & 0x3);
 }
 
 static __inline unsigned int
-get_RRROpcodeExtension_Y1(tile_bundle_bits n)
+get_RRROpcodeExtension_Y1(tilegx_bundle_bits n)
 {
   return (((unsigned int)(n >> 49)) & 0x3);
 }
 
 static __inline unsigned int
-get_RouteOpcodeExtension_SN(tile_bundle_bits num)
+get_ShAmt_X0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((n >> 0)) & 0x3ff);
+  return (((n >> 12)) & 0x3f);
 }
 
 static __inline unsigned int
-get_S_X0(tile_bundle_bits num)
+get_ShAmt_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 43)) & 0x3f);
+}
+
+static __inline unsigned int
+get_ShAmt_Y0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((n >> 27)) & 0x1);
+  return (((n >> 12)) & 0x3f);
 }
 
 static __inline unsigned int
-get_S_X1(tile_bundle_bits n)
+get_ShAmt_Y1(tilegx_bundle_bits n)
 {
-  return (((unsigned int)(n >> 58)) & 0x1);
+  return (((unsigned int)(n >> 43)) & 0x3f);
 }
 
 static __inline unsigned int
-get_ShAmt_X0(tile_bundle_bits num)
+get_ShiftOpcodeExtension_X0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((n >> 12)) & 0x1f);
+  return (((n >> 18)) & 0x3ff);
 }
 
 static __inline unsigned int
-get_ShAmt_X1(tile_bundle_bits n)
+get_ShiftOpcodeExtension_X1(tilegx_bundle_bits n)
 {
-  return (((unsigned int)(n >> 43)) & 0x1f);
+  return (((unsigned int)(n >> 49)) & 0x3ff);
 }
 
 static __inline unsigned int
-get_ShAmt_Y0(tile_bundle_bits num)
+get_ShiftOpcodeExtension_Y0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((n >> 12)) & 0x1f);
+  return (((n >> 18)) & 0x3);
 }
 
 static __inline unsigned int
-get_ShAmt_Y1(tile_bundle_bits n)
+get_ShiftOpcodeExtension_Y1(tilegx_bundle_bits n)
 {
-  return (((unsigned int)(n >> 43)) & 0x1f);
+  return (((unsigned int)(n >> 49)) & 0x3);
 }
 
 static __inline unsigned int
-get_SrcA_X0(tile_bundle_bits num)
+get_SrcA_X0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
   return (((n >> 6)) & 0x3f);
 }
 
 static __inline unsigned int
-get_SrcA_X1(tile_bundle_bits n)
+get_SrcA_X1(tilegx_bundle_bits n)
 {
   return (((unsigned int)(n >> 37)) & 0x3f);
 }
 
 static __inline unsigned int
-get_SrcA_Y0(tile_bundle_bits num)
+get_SrcA_Y0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
   return (((n >> 6)) & 0x3f);
 }
 
 static __inline unsigned int
-get_SrcA_Y1(tile_bundle_bits n)
+get_SrcA_Y1(tilegx_bundle_bits n)
 {
   return (((unsigned int)(n >> 37)) & 0x3f);
 }
 
 static __inline unsigned int
-get_SrcA_Y2(tile_bundle_bits n)
-{
-  return (((n >> 26)) & 0x00000001) |
-         (((unsigned int)(n >> 50)) & 0x0000003e);
-}
-
-static __inline unsigned int
-get_SrcBDest_Y2(tile_bundle_bits num)
+get_SrcA_Y2(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
   return (((n >> 20)) & 0x3f);
 }
 
 static __inline unsigned int
-get_SrcB_X0(tile_bundle_bits num)
+get_SrcBDest_Y2(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 51)) & 0x3f);
+}
+
+static __inline unsigned int
+get_SrcB_X0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
   return (((n >> 12)) & 0x3f);
 }
 
 static __inline unsigned int
-get_SrcB_X1(tile_bundle_bits n)
+get_SrcB_X1(tilegx_bundle_bits n)
 {
   return (((unsigned int)(n >> 43)) & 0x3f);
 }
 
 static __inline unsigned int
-get_SrcB_Y0(tile_bundle_bits num)
+get_SrcB_Y0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
   return (((n >> 12)) & 0x3f);
 }
 
 static __inline unsigned int
-get_SrcB_Y1(tile_bundle_bits n)
+get_SrcB_Y1(tilegx_bundle_bits n)
 {
   return (((unsigned int)(n >> 43)) & 0x3f);
 }
 
 static __inline unsigned int
-get_Src_SN(tile_bundle_bits num)
+get_UnaryOpcodeExtension_X0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((n >> 0)) & 0x3);
+  return (((n >> 12)) & 0x3f);
 }
 
 static __inline unsigned int
-get_UnOpcodeExtension_X0(tile_bundle_bits num)
+get_UnaryOpcodeExtension_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 43)) & 0x3f);
+}
+
+static __inline unsigned int
+get_UnaryOpcodeExtension_Y0(tilegx_bundle_bits num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((n >> 12)) & 0x1f);
+  return (((n >> 12)) & 0x3f);
 }
 
 static __inline unsigned int
-get_UnOpcodeExtension_X1(tile_bundle_bits n)
+get_UnaryOpcodeExtension_Y1(tilegx_bundle_bits n)
 {
-  return (((unsigned int)(n >> 43)) & 0x1f);
-}
-
-static __inline unsigned int
-get_UnOpcodeExtension_Y0(tile_bundle_bits num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((n >> 12)) & 0x1f);
-}
-
-static __inline unsigned int
-get_UnOpcodeExtension_Y1(tile_bundle_bits n)
-{
-  return (((unsigned int)(n >> 43)) & 0x1f);
-}
-
-static __inline unsigned int
-get_UnShOpcodeExtension_X0(tile_bundle_bits num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((n >> 17)) & 0x3ff);
-}
-
-static __inline unsigned int
-get_UnShOpcodeExtension_X1(tile_bundle_bits n)
-{
-  return (((unsigned int)(n >> 48)) & 0x3ff);
-}
-
-static __inline unsigned int
-get_UnShOpcodeExtension_Y0(tile_bundle_bits num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((n >> 17)) & 0x7);
-}
-
-static __inline unsigned int
-get_UnShOpcodeExtension_Y1(tile_bundle_bits n)
-{
-  return (((unsigned int)(n >> 48)) & 0x7);
+  return (((unsigned int)(n >> 43)) & 0x3f);
 }
 
 
@@ -874,546 +722,441 @@
 
 
 
-static __inline tile_bundle_bits
-create_BrOff_SN(int num)
+static __inline tilegx_bundle_bits
+create_BFEnd_X0(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return ((n & 0x3ff) << 0);
+  return ((n & 0x3f) << 12);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
+create_BFOpcodeExtension_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0xf) << 24);
+}
+
+static __inline tilegx_bundle_bits
+create_BFStart_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3f) << 18);
+}
+
+static __inline tilegx_bundle_bits
 create_BrOff_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x00007fff)) << 43) |
-         (((tile_bundle_bits)(n & 0x00018000)) << 20);
+  return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
+         (((tilegx_bundle_bits)(n & 0x0001ffc0)) << 37);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_BrType_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0xf)) << 31);
+  return (((tilegx_bundle_bits)(n & 0x1f)) << 54);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Dest_Imm8_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x0000003f)) << 31) |
-         (((tile_bundle_bits)(n & 0x000000c0)) << 43);
+  return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
+         (((tilegx_bundle_bits)(n & 0x000000c0)) << 43);
 }
 
-static __inline tile_bundle_bits
-create_Dest_SN(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return ((n & 0x3) << 2);
-}
-
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Dest_X0(int num)
 {
   const unsigned int n = (unsigned int)num;
   return ((n & 0x3f) << 0);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Dest_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x3f)) << 31);
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Dest_Y0(int num)
 {
   const unsigned int n = (unsigned int)num;
   return ((n & 0x3f) << 0);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Dest_Y1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x3f)) << 31);
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Imm16_X0(int num)
 {
   const unsigned int n = (unsigned int)num;
   return ((n & 0xffff) << 12);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Imm16_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0xffff)) << 43);
+  return (((tilegx_bundle_bits)(n & 0xffff)) << 43);
 }
 
-static __inline tile_bundle_bits
-create_Imm8_SN(int num)
+static __inline tilegx_bundle_bits
+create_Imm8OpcodeExtension_X0(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return ((n & 0xff) << 0);
+  return ((n & 0xff) << 20);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
+create_Imm8OpcodeExtension_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0xff)) << 51);
+}
+
+static __inline tilegx_bundle_bits
 create_Imm8_X0(int num)
 {
   const unsigned int n = (unsigned int)num;
   return ((n & 0xff) << 12);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Imm8_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0xff)) << 43);
+  return (((tilegx_bundle_bits)(n & 0xff)) << 43);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Imm8_Y0(int num)
 {
   const unsigned int n = (unsigned int)num;
   return ((n & 0xff) << 12);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Imm8_Y1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0xff)) << 43);
+  return (((tilegx_bundle_bits)(n & 0xff)) << 43);
 }
 
-static __inline tile_bundle_bits
-create_ImmOpcodeExtension_X0(int num)
+static __inline tilegx_bundle_bits
+create_JumpOff_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return ((n & 0x7f) << 20);
+  return (((tilegx_bundle_bits)(n & 0x7ffffff)) << 31);
 }
 
-static __inline tile_bundle_bits
-create_ImmOpcodeExtension_X1(int num)
+static __inline tilegx_bundle_bits
+create_JumpOpcodeExtension_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x7f)) << 51);
+  return (((tilegx_bundle_bits)(n & 0x1)) << 58);
 }
 
-static __inline tile_bundle_bits
-create_ImmRROpcodeExtension_SN(int num)
+static __inline tilegx_bundle_bits
+create_MF_Imm14_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return ((n & 0x3) << 8);
+  return (((tilegx_bundle_bits)(n & 0x3fff)) << 37);
 }
 
-static __inline tile_bundle_bits
-create_JOffLong_X1(int num)
+static __inline tilegx_bundle_bits
+create_MT_Imm14_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x00007fff)) << 43) |
-         (((tile_bundle_bits)(n & 0x00018000)) << 20) |
-         (((tile_bundle_bits)(n & 0x001e0000)) << 14) |
-         (((tile_bundle_bits)(n & 0x07e00000)) << 16) |
-         (((tile_bundle_bits)(n & 0x18000000)) << 31);
+  return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
+         (((tilegx_bundle_bits)(n & 0x00003fc0)) << 37);
 }
 
-static __inline tile_bundle_bits
-create_JOff_X1(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x00007fff)) << 43) |
-         (((tile_bundle_bits)(n & 0x00018000)) << 20) |
-         (((tile_bundle_bits)(n & 0x001e0000)) << 14) |
-         (((tile_bundle_bits)(n & 0x07e00000)) << 16) |
-         (((tile_bundle_bits)(n & 0x08000000)) << 31);
-}
-
-static __inline tile_bundle_bits
-create_MF_Imm15_X1(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x00003fff)) << 37) |
-         (((tile_bundle_bits)(n & 0x00004000)) << 44);
-}
-
-static __inline tile_bundle_bits
-create_MMEnd_X0(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return ((n & 0x1f) << 18);
-}
-
-static __inline tile_bundle_bits
-create_MMEnd_X1(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x1f)) << 49);
-}
-
-static __inline tile_bundle_bits
-create_MMStart_X0(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return ((n & 0x1f) << 23);
-}
-
-static __inline tile_bundle_bits
-create_MMStart_X1(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x1f)) << 54);
-}
-
-static __inline tile_bundle_bits
-create_MT_Imm15_X1(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x0000003f)) << 31) |
-         (((tile_bundle_bits)(n & 0x00003fc0)) << 37) |
-         (((tile_bundle_bits)(n & 0x00004000)) << 44);
-}
-
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Mode(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x1)) << 63);
+  return (((tilegx_bundle_bits)(n & 0x3)) << 62);
 }
 
-static __inline tile_bundle_bits
-create_NoRegOpcodeExtension_SN(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return ((n & 0xf) << 0);
-}
-
-static __inline tile_bundle_bits
-create_Opcode_SN(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return ((n & 0x3f) << 10);
-}
-
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Opcode_X0(int num)
 {
   const unsigned int n = (unsigned int)num;
   return ((n & 0x7) << 28);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Opcode_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0xf)) << 59);
+  return (((tilegx_bundle_bits)(n & 0x7)) << 59);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Opcode_Y0(int num)
 {
   const unsigned int n = (unsigned int)num;
   return ((n & 0xf) << 27);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Opcode_Y1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0xf)) << 59);
+  return (((tilegx_bundle_bits)(n & 0xf)) << 58);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_Opcode_Y2(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x7)) << 56);
+  return ((n & 0x00000001) << 26) |
+         (((tilegx_bundle_bits)(n & 0x00000002)) << 56);
 }
 
-static __inline tile_bundle_bits
-create_RROpcodeExtension_SN(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return ((n & 0xf) << 4);
-}
-
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_RRROpcodeExtension_X0(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return ((n & 0x1ff) << 18);
+  return ((n & 0x3ff) << 18);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_RRROpcodeExtension_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x1ff)) << 49);
+  return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_RRROpcodeExtension_Y0(int num)
 {
   const unsigned int n = (unsigned int)num;
   return ((n & 0x3) << 18);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_RRROpcodeExtension_Y1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x3)) << 49);
+  return (((tilegx_bundle_bits)(n & 0x3)) << 49);
 }
 
-static __inline tile_bundle_bits
-create_RouteOpcodeExtension_SN(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return ((n & 0x3ff) << 0);
-}
-
-static __inline tile_bundle_bits
-create_S_X0(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return ((n & 0x1) << 27);
-}
-
-static __inline tile_bundle_bits
-create_S_X1(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x1)) << 58);
-}
-
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_ShAmt_X0(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return ((n & 0x1f) << 12);
+  return ((n & 0x3f) << 12);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_ShAmt_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x1f)) << 43);
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_ShAmt_Y0(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return ((n & 0x1f) << 12);
+  return ((n & 0x3f) << 12);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_ShAmt_Y1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x1f)) << 43);
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3ff) << 18);
+}
+
+static __inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
+}
+
+static __inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_Y0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3) << 18);
+}
+
+static __inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_Y1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3)) << 49);
+}
+
+static __inline tilegx_bundle_bits
 create_SrcA_X0(int num)
 {
   const unsigned int n = (unsigned int)num;
   return ((n & 0x3f) << 6);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_SrcA_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x3f)) << 37);
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_SrcA_Y0(int num)
 {
   const unsigned int n = (unsigned int)num;
   return ((n & 0x3f) << 6);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_SrcA_Y1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x3f)) << 37);
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_SrcA_Y2(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return ((n & 0x00000001) << 26) |
-         (((tile_bundle_bits)(n & 0x0000003e)) << 50);
-}
-
-static __inline tile_bundle_bits
-create_SrcBDest_Y2(int num)
-{
-  const unsigned int n = (unsigned int)num;
   return ((n & 0x3f) << 20);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
+create_SrcBDest_Y2(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 51);
+}
+
+static __inline tilegx_bundle_bits
 create_SrcB_X0(int num)
 {
   const unsigned int n = (unsigned int)num;
   return ((n & 0x3f) << 12);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_SrcB_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x3f)) << 43);
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_SrcB_Y0(int num)
 {
   const unsigned int n = (unsigned int)num;
   return ((n & 0x3f) << 12);
 }
 
-static __inline tile_bundle_bits
+static __inline tilegx_bundle_bits
 create_SrcB_Y1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x3f)) << 43);
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
 }
 
-static __inline tile_bundle_bits
-create_Src_SN(int num)
+static __inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_X0(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return ((n & 0x3) << 0);
+  return ((n & 0x3f) << 12);
 }
 
-static __inline tile_bundle_bits
-create_UnOpcodeExtension_X0(int num)
+static __inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_X1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return ((n & 0x1f) << 12);
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
 }
 
-static __inline tile_bundle_bits
-create_UnOpcodeExtension_X1(int num)
+static __inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_Y0(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x1f)) << 43);
+  return ((n & 0x3f) << 12);
 }
 
-static __inline tile_bundle_bits
-create_UnOpcodeExtension_Y0(int num)
+static __inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_Y1(int num)
 {
   const unsigned int n = (unsigned int)num;
-  return ((n & 0x1f) << 12);
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
 }
 
-static __inline tile_bundle_bits
-create_UnOpcodeExtension_Y1(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x1f)) << 43);
-}
-
-static __inline tile_bundle_bits
-create_UnShOpcodeExtension_X0(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return ((n & 0x3ff) << 17);
-}
-
-static __inline tile_bundle_bits
-create_UnShOpcodeExtension_X1(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x3ff)) << 48);
-}
-
-static __inline tile_bundle_bits
-create_UnShOpcodeExtension_Y0(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return ((n & 0x7) << 17);
-}
-
-static __inline tile_bundle_bits
-create_UnShOpcodeExtension_Y1(int num)
-{
-  const unsigned int n = (unsigned int)num;
-  return (((tile_bundle_bits)(n & 0x7)) << 48);
-}
-
-
 
 typedef enum
 {
-  TILE_PIPELINE_X0,
-  TILE_PIPELINE_X1,
-  TILE_PIPELINE_Y0,
-  TILE_PIPELINE_Y1,
-  TILE_PIPELINE_Y2,
-} tile_pipeline;
+  TILEGX_PIPELINE_X0,
+  TILEGX_PIPELINE_X1,
+  TILEGX_PIPELINE_Y0,
+  TILEGX_PIPELINE_Y1,
+  TILEGX_PIPELINE_Y2,
+} tilegx_pipeline;
 
-#define tile_is_x_pipeline(p) ((int)(p) <= (int)TILE_PIPELINE_X1)
+#define tilegx_is_x_pipeline(p) ((int)(p) <= (int)TILEGX_PIPELINE_X1)
 
 typedef enum
 {
-  TILE_OP_TYPE_REGISTER,
-  TILE_OP_TYPE_IMMEDIATE,
-  TILE_OP_TYPE_ADDRESS,
-  TILE_OP_TYPE_SPR
-} tile_operand_type;
+  TILEGX_OP_TYPE_REGISTER,
+  TILEGX_OP_TYPE_IMMEDIATE,
+  TILEGX_OP_TYPE_ADDRESS,
+  TILEGX_OP_TYPE_SPR
+} tilegx_operand_type;
 
-/* This is the bit that determines if a bundle is in the Y encoding. */
-#define TILE_BUNDLE_Y_ENCODING_MASK ((tile_bundle_bits)1 << 63)
+/* These are the bits that determine if a bundle is in the X encoding. */
+#define TILEGX_BUNDLE_MODE_MASK ((tilegx_bundle_bits)3 << 62)
 
 enum
 {
   /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */
-  TILE_MAX_INSTRUCTIONS_PER_BUNDLE = 3,
+  TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE = 3,
 
   /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */
-  TILE_NUM_PIPELINE_ENCODINGS = 5,
+  TILEGX_NUM_PIPELINE_ENCODINGS = 5,
 
-  /* Log base 2 of TILE_BUNDLE_SIZE_IN_BYTES. */
-  TILE_LOG2_BUNDLE_SIZE_IN_BYTES = 3,
+  /* Log base 2 of TILEGX_BUNDLE_SIZE_IN_BYTES. */
+  TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES = 3,
 
   /* Instructions take this many bytes. */
-  TILE_BUNDLE_SIZE_IN_BYTES = 1 << TILE_LOG2_BUNDLE_SIZE_IN_BYTES,
+  TILEGX_BUNDLE_SIZE_IN_BYTES = 1 << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES,
 
-  /* Log base 2 of TILE_BUNDLE_ALIGNMENT_IN_BYTES. */
-  TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3,
+  /* Log base 2 of TILEGX_BUNDLE_ALIGNMENT_IN_BYTES. */
+  TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3,
 
   /* Bundles should be aligned modulo this number of bytes. */
-  TILE_BUNDLE_ALIGNMENT_IN_BYTES =
-    (1 << TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES),
-
-  /* Log base 2 of TILE_SN_INSTRUCTION_SIZE_IN_BYTES. */
-  TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES = 1,
-
-  /* Static network instructions take this many bytes. */
-  TILE_SN_INSTRUCTION_SIZE_IN_BYTES =
-    (1 << TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES),
+  TILEGX_BUNDLE_ALIGNMENT_IN_BYTES =
+    (1 << TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES),
 
   /* Number of registers (some are magic, such as network I/O). */
-  TILE_NUM_REGISTERS = 64,
-
-  /* Number of static network registers. */
-  TILE_NUM_SN_REGISTERS = 4
+  TILEGX_NUM_REGISTERS = 64,
 };
 
 
-struct tile_operand
+struct tilegx_operand
 {
   /* Is this operand a register, immediate or address? */
-  tile_operand_type type;
+  tilegx_operand_type type;
 
   /* The default relocation type for this operand.  */
   signed int default_reloc : 16;
@@ -1437,27 +1180,27 @@
   unsigned int rightshift : 2;
 
   /* Return the bits for this operand to be ORed into an existing bundle. */
-  tile_bundle_bits (*insert) (int op);
+  tilegx_bundle_bits (*insert) (int op);
 
   /* Extract this operand and return it. */
-  unsigned int (*extract) (tile_bundle_bits bundle);
+  unsigned int (*extract) (tilegx_bundle_bits bundle);
 };
 
 
-extern const struct tile_operand tile_operands[];
+extern const struct tilegx_operand tilegx_operands[];
 
 /* One finite-state machine per pipe for rapid instruction decoding. */
 extern const unsigned short * const
-tile_bundle_decoder_fsms[TILE_NUM_PIPELINE_ENCODINGS];
+tilegx_bundle_decoder_fsms[TILEGX_NUM_PIPELINE_ENCODINGS];
 
 
-struct tile_opcode
+struct tilegx_opcode
 {
   /* The opcode mnemonic, e.g. "add" */
   const char *name;
 
   /* The enum value for this mnemonic. */
-  tile_mnemonic mnemonic;
+  tilegx_mnemonic mnemonic;
 
   /* A bit mask of which of the five pipes this instruction
      is compatible with:
@@ -1478,36 +1221,28 @@
   unsigned char can_bundle;
 
   /* The description of the operands. Each of these is an
-   * index into the tile_operands[] table. */
-  unsigned char operands[TILE_NUM_PIPELINE_ENCODINGS][TILE_MAX_OPERANDS];
+   * index into the tilegx_operands[] table. */
+  unsigned char operands[TILEGX_NUM_PIPELINE_ENCODINGS][TILEGX_MAX_OPERANDS];
 
 };
 
-extern const struct tile_opcode tile_opcodes[];
-
+extern const struct tilegx_opcode tilegx_opcodes[];
 
 /* Used for non-textual disassembly into structs. */
-struct tile_decoded_instruction
+struct tilegx_decoded_instruction
 {
-  const struct tile_opcode *opcode;
-  const struct tile_operand *operands[TILE_MAX_OPERANDS];
-  int operand_values[TILE_MAX_OPERANDS];
+  const struct tilegx_opcode *opcode;
+  const struct tilegx_operand *operands[TILEGX_MAX_OPERANDS];
+  long long operand_values[TILEGX_MAX_OPERANDS];
 };
 
 
 /* Disassemble a bundle into a struct for machine processing. */
-extern int parse_insn_tile(tile_bundle_bits bits,
-                           unsigned int pc,
-                           struct tile_decoded_instruction
-                           decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]);
-
-
-/* Given a set of bundle bits and a specific pipe, returns which
- * instruction the bundle contains in that pipe.
- */
-extern const struct tile_opcode *
-find_opcode(tile_bundle_bits bits, tile_pipeline pipe);
+extern int parse_insn_tilegx(tilegx_bundle_bits bits,
+                             unsigned long long pc,
+                             struct tilegx_decoded_instruction
+                             decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE]);
 
 
 
-#endif /* opcode_tile_h */
+#endif /* opcode_tilegx_h */
diff --git a/arch/tile/include/asm/opcode_constants_64.h b/arch/tile/include/asm/opcode_constants_64.h
index 227d033..7101928 100644
--- a/arch/tile/include/asm/opcode_constants_64.h
+++ b/arch/tile/include/asm/opcode_constants_64.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
  *
  *   This program is free software; you can redistribute it and/or
  *   modify it under the terms of the GNU General Public License
@@ -19,462 +19,591 @@
 #define _TILE_OPCODE_CONSTANTS_H
 enum
 {
-  ADDBS_U_SPECIAL_0_OPCODE_X0 = 98,
-  ADDBS_U_SPECIAL_0_OPCODE_X1 = 68,
-  ADDB_SPECIAL_0_OPCODE_X0 = 1,
-  ADDB_SPECIAL_0_OPCODE_X1 = 1,
-  ADDHS_SPECIAL_0_OPCODE_X0 = 99,
-  ADDHS_SPECIAL_0_OPCODE_X1 = 69,
-  ADDH_SPECIAL_0_OPCODE_X0 = 2,
-  ADDH_SPECIAL_0_OPCODE_X1 = 2,
-  ADDIB_IMM_0_OPCODE_X0 = 1,
-  ADDIB_IMM_0_OPCODE_X1 = 1,
-  ADDIH_IMM_0_OPCODE_X0 = 2,
-  ADDIH_IMM_0_OPCODE_X1 = 2,
-  ADDI_IMM_0_OPCODE_X0 = 3,
-  ADDI_IMM_0_OPCODE_X1 = 3,
-  ADDI_IMM_1_OPCODE_SN = 1,
-  ADDI_OPCODE_Y0 = 9,
-  ADDI_OPCODE_Y1 = 7,
-  ADDLIS_OPCODE_X0 = 1,
-  ADDLIS_OPCODE_X1 = 2,
-  ADDLI_OPCODE_X0 = 2,
-  ADDLI_OPCODE_X1 = 3,
-  ADDS_SPECIAL_0_OPCODE_X0 = 96,
-  ADDS_SPECIAL_0_OPCODE_X1 = 66,
-  ADD_SPECIAL_0_OPCODE_X0 = 3,
-  ADD_SPECIAL_0_OPCODE_X1 = 3,
-  ADD_SPECIAL_0_OPCODE_Y0 = 0,
-  ADD_SPECIAL_0_OPCODE_Y1 = 0,
-  ADIFFB_U_SPECIAL_0_OPCODE_X0 = 4,
-  ADIFFH_SPECIAL_0_OPCODE_X0 = 5,
-  ANDI_IMM_0_OPCODE_X0 = 1,
-  ANDI_IMM_0_OPCODE_X1 = 4,
-  ANDI_OPCODE_Y0 = 10,
-  ANDI_OPCODE_Y1 = 8,
-  AND_SPECIAL_0_OPCODE_X0 = 6,
-  AND_SPECIAL_0_OPCODE_X1 = 4,
-  AND_SPECIAL_2_OPCODE_Y0 = 0,
-  AND_SPECIAL_2_OPCODE_Y1 = 0,
-  AULI_OPCODE_X0 = 3,
-  AULI_OPCODE_X1 = 4,
-  AVGB_U_SPECIAL_0_OPCODE_X0 = 7,
-  AVGH_SPECIAL_0_OPCODE_X0 = 8,
-  BBNST_BRANCH_OPCODE_X1 = 15,
-  BBNS_BRANCH_OPCODE_X1 = 14,
-  BBNS_OPCODE_SN = 63,
-  BBST_BRANCH_OPCODE_X1 = 13,
-  BBS_BRANCH_OPCODE_X1 = 12,
-  BBS_OPCODE_SN = 62,
-  BGEZT_BRANCH_OPCODE_X1 = 7,
-  BGEZ_BRANCH_OPCODE_X1 = 6,
-  BGEZ_OPCODE_SN = 61,
-  BGZT_BRANCH_OPCODE_X1 = 5,
-  BGZ_BRANCH_OPCODE_X1 = 4,
-  BGZ_OPCODE_SN = 58,
-  BITX_UN_0_SHUN_0_OPCODE_X0 = 1,
-  BITX_UN_0_SHUN_0_OPCODE_Y0 = 1,
-  BLEZT_BRANCH_OPCODE_X1 = 11,
-  BLEZ_BRANCH_OPCODE_X1 = 10,
-  BLEZ_OPCODE_SN = 59,
-  BLZT_BRANCH_OPCODE_X1 = 9,
-  BLZ_BRANCH_OPCODE_X1 = 8,
-  BLZ_OPCODE_SN = 60,
-  BNZT_BRANCH_OPCODE_X1 = 3,
-  BNZ_BRANCH_OPCODE_X1 = 2,
-  BNZ_OPCODE_SN = 57,
-  BPT_NOREG_RR_IMM_0_OPCODE_SN = 1,
-  BRANCH_OPCODE_X1 = 5,
-  BYTEX_UN_0_SHUN_0_OPCODE_X0 = 2,
-  BYTEX_UN_0_SHUN_0_OPCODE_Y0 = 2,
-  BZT_BRANCH_OPCODE_X1 = 1,
-  BZ_BRANCH_OPCODE_X1 = 0,
-  BZ_OPCODE_SN = 56,
-  CLZ_UN_0_SHUN_0_OPCODE_X0 = 3,
-  CLZ_UN_0_SHUN_0_OPCODE_Y0 = 3,
-  CRC32_32_SPECIAL_0_OPCODE_X0 = 9,
-  CRC32_8_SPECIAL_0_OPCODE_X0 = 10,
-  CTZ_UN_0_SHUN_0_OPCODE_X0 = 4,
-  CTZ_UN_0_SHUN_0_OPCODE_Y0 = 4,
-  DRAIN_UN_0_SHUN_0_OPCODE_X1 = 1,
-  DTLBPR_UN_0_SHUN_0_OPCODE_X1 = 2,
-  DWORD_ALIGN_SPECIAL_0_OPCODE_X0 = 95,
-  FINV_UN_0_SHUN_0_OPCODE_X1 = 3,
-  FLUSH_UN_0_SHUN_0_OPCODE_X1 = 4,
-  FNOP_NOREG_RR_IMM_0_OPCODE_SN = 3,
-  FNOP_UN_0_SHUN_0_OPCODE_X0 = 5,
-  FNOP_UN_0_SHUN_0_OPCODE_X1 = 5,
-  FNOP_UN_0_SHUN_0_OPCODE_Y0 = 5,
-  FNOP_UN_0_SHUN_0_OPCODE_Y1 = 1,
-  HALT_NOREG_RR_IMM_0_OPCODE_SN = 0,
-  ICOH_UN_0_SHUN_0_OPCODE_X1 = 6,
-  ILL_UN_0_SHUN_0_OPCODE_X1 = 7,
-  ILL_UN_0_SHUN_0_OPCODE_Y1 = 2,
-  IMM_0_OPCODE_SN = 0,
-  IMM_0_OPCODE_X0 = 4,
-  IMM_0_OPCODE_X1 = 6,
-  IMM_1_OPCODE_SN = 1,
-  IMM_OPCODE_0_X0 = 5,
-  INTHB_SPECIAL_0_OPCODE_X0 = 11,
-  INTHB_SPECIAL_0_OPCODE_X1 = 5,
-  INTHH_SPECIAL_0_OPCODE_X0 = 12,
-  INTHH_SPECIAL_0_OPCODE_X1 = 6,
-  INTLB_SPECIAL_0_OPCODE_X0 = 13,
-  INTLB_SPECIAL_0_OPCODE_X1 = 7,
-  INTLH_SPECIAL_0_OPCODE_X0 = 14,
-  INTLH_SPECIAL_0_OPCODE_X1 = 8,
-  INV_UN_0_SHUN_0_OPCODE_X1 = 8,
-  IRET_UN_0_SHUN_0_OPCODE_X1 = 9,
-  JALB_OPCODE_X1 = 13,
-  JALF_OPCODE_X1 = 12,
-  JALRP_SPECIAL_0_OPCODE_X1 = 9,
-  JALRR_IMM_1_OPCODE_SN = 3,
-  JALR_RR_IMM_0_OPCODE_SN = 5,
-  JALR_SPECIAL_0_OPCODE_X1 = 10,
-  JB_OPCODE_X1 = 11,
-  JF_OPCODE_X1 = 10,
-  JRP_SPECIAL_0_OPCODE_X1 = 11,
-  JRR_IMM_1_OPCODE_SN = 2,
-  JR_RR_IMM_0_OPCODE_SN = 4,
-  JR_SPECIAL_0_OPCODE_X1 = 12,
-  LBADD_IMM_0_OPCODE_X1 = 22,
-  LBADD_U_IMM_0_OPCODE_X1 = 23,
-  LB_OPCODE_Y2 = 0,
-  LB_UN_0_SHUN_0_OPCODE_X1 = 10,
-  LB_U_OPCODE_Y2 = 1,
-  LB_U_UN_0_SHUN_0_OPCODE_X1 = 11,
-  LHADD_IMM_0_OPCODE_X1 = 24,
-  LHADD_U_IMM_0_OPCODE_X1 = 25,
-  LH_OPCODE_Y2 = 2,
-  LH_UN_0_SHUN_0_OPCODE_X1 = 12,
-  LH_U_OPCODE_Y2 = 3,
-  LH_U_UN_0_SHUN_0_OPCODE_X1 = 13,
-  LNK_SPECIAL_0_OPCODE_X1 = 13,
-  LWADD_IMM_0_OPCODE_X1 = 26,
-  LWADD_NA_IMM_0_OPCODE_X1 = 27,
-  LW_NA_UN_0_SHUN_0_OPCODE_X1 = 24,
-  LW_OPCODE_Y2 = 4,
-  LW_UN_0_SHUN_0_OPCODE_X1 = 14,
-  MAXB_U_SPECIAL_0_OPCODE_X0 = 15,
-  MAXB_U_SPECIAL_0_OPCODE_X1 = 14,
-  MAXH_SPECIAL_0_OPCODE_X0 = 16,
-  MAXH_SPECIAL_0_OPCODE_X1 = 15,
-  MAXIB_U_IMM_0_OPCODE_X0 = 4,
-  MAXIB_U_IMM_0_OPCODE_X1 = 5,
-  MAXIH_IMM_0_OPCODE_X0 = 5,
-  MAXIH_IMM_0_OPCODE_X1 = 6,
-  MFSPR_IMM_0_OPCODE_X1 = 7,
-  MF_UN_0_SHUN_0_OPCODE_X1 = 15,
-  MINB_U_SPECIAL_0_OPCODE_X0 = 17,
-  MINB_U_SPECIAL_0_OPCODE_X1 = 16,
-  MINH_SPECIAL_0_OPCODE_X0 = 18,
-  MINH_SPECIAL_0_OPCODE_X1 = 17,
-  MINIB_U_IMM_0_OPCODE_X0 = 6,
-  MINIB_U_IMM_0_OPCODE_X1 = 8,
-  MINIH_IMM_0_OPCODE_X0 = 7,
-  MINIH_IMM_0_OPCODE_X1 = 9,
-  MM_OPCODE_X0 = 6,
-  MM_OPCODE_X1 = 7,
-  MNZB_SPECIAL_0_OPCODE_X0 = 19,
-  MNZB_SPECIAL_0_OPCODE_X1 = 18,
-  MNZH_SPECIAL_0_OPCODE_X0 = 20,
-  MNZH_SPECIAL_0_OPCODE_X1 = 19,
-  MNZ_SPECIAL_0_OPCODE_X0 = 21,
-  MNZ_SPECIAL_0_OPCODE_X1 = 20,
-  MNZ_SPECIAL_1_OPCODE_Y0 = 0,
-  MNZ_SPECIAL_1_OPCODE_Y1 = 1,
-  MOVEI_IMM_1_OPCODE_SN = 0,
-  MOVE_RR_IMM_0_OPCODE_SN = 8,
-  MTSPR_IMM_0_OPCODE_X1 = 10,
-  MULHHA_SS_SPECIAL_0_OPCODE_X0 = 22,
-  MULHHA_SS_SPECIAL_7_OPCODE_Y0 = 0,
-  MULHHA_SU_SPECIAL_0_OPCODE_X0 = 23,
-  MULHHA_UU_SPECIAL_0_OPCODE_X0 = 24,
-  MULHHA_UU_SPECIAL_7_OPCODE_Y0 = 1,
-  MULHHSA_UU_SPECIAL_0_OPCODE_X0 = 25,
-  MULHH_SS_SPECIAL_0_OPCODE_X0 = 26,
-  MULHH_SS_SPECIAL_6_OPCODE_Y0 = 0,
-  MULHH_SU_SPECIAL_0_OPCODE_X0 = 27,
-  MULHH_UU_SPECIAL_0_OPCODE_X0 = 28,
-  MULHH_UU_SPECIAL_6_OPCODE_Y0 = 1,
-  MULHLA_SS_SPECIAL_0_OPCODE_X0 = 29,
-  MULHLA_SU_SPECIAL_0_OPCODE_X0 = 30,
-  MULHLA_US_SPECIAL_0_OPCODE_X0 = 31,
-  MULHLA_UU_SPECIAL_0_OPCODE_X0 = 32,
-  MULHLSA_UU_SPECIAL_0_OPCODE_X0 = 33,
-  MULHLSA_UU_SPECIAL_5_OPCODE_Y0 = 0,
-  MULHL_SS_SPECIAL_0_OPCODE_X0 = 34,
-  MULHL_SU_SPECIAL_0_OPCODE_X0 = 35,
-  MULHL_US_SPECIAL_0_OPCODE_X0 = 36,
-  MULHL_UU_SPECIAL_0_OPCODE_X0 = 37,
-  MULLLA_SS_SPECIAL_0_OPCODE_X0 = 38,
-  MULLLA_SS_SPECIAL_7_OPCODE_Y0 = 2,
-  MULLLA_SU_SPECIAL_0_OPCODE_X0 = 39,
-  MULLLA_UU_SPECIAL_0_OPCODE_X0 = 40,
-  MULLLA_UU_SPECIAL_7_OPCODE_Y0 = 3,
-  MULLLSA_UU_SPECIAL_0_OPCODE_X0 = 41,
-  MULLL_SS_SPECIAL_0_OPCODE_X0 = 42,
-  MULLL_SS_SPECIAL_6_OPCODE_Y0 = 2,
-  MULLL_SU_SPECIAL_0_OPCODE_X0 = 43,
-  MULLL_UU_SPECIAL_0_OPCODE_X0 = 44,
-  MULLL_UU_SPECIAL_6_OPCODE_Y0 = 3,
-  MVNZ_SPECIAL_0_OPCODE_X0 = 45,
-  MVNZ_SPECIAL_1_OPCODE_Y0 = 1,
-  MVZ_SPECIAL_0_OPCODE_X0 = 46,
-  MVZ_SPECIAL_1_OPCODE_Y0 = 2,
-  MZB_SPECIAL_0_OPCODE_X0 = 47,
-  MZB_SPECIAL_0_OPCODE_X1 = 21,
-  MZH_SPECIAL_0_OPCODE_X0 = 48,
-  MZH_SPECIAL_0_OPCODE_X1 = 22,
-  MZ_SPECIAL_0_OPCODE_X0 = 49,
-  MZ_SPECIAL_0_OPCODE_X1 = 23,
-  MZ_SPECIAL_1_OPCODE_Y0 = 3,
-  MZ_SPECIAL_1_OPCODE_Y1 = 2,
-  NAP_UN_0_SHUN_0_OPCODE_X1 = 16,
-  NOP_NOREG_RR_IMM_0_OPCODE_SN = 2,
-  NOP_UN_0_SHUN_0_OPCODE_X0 = 6,
-  NOP_UN_0_SHUN_0_OPCODE_X1 = 17,
-  NOP_UN_0_SHUN_0_OPCODE_Y0 = 6,
-  NOP_UN_0_SHUN_0_OPCODE_Y1 = 3,
-  NOREG_RR_IMM_0_OPCODE_SN = 0,
-  NOR_SPECIAL_0_OPCODE_X0 = 50,
-  NOR_SPECIAL_0_OPCODE_X1 = 24,
-  NOR_SPECIAL_2_OPCODE_Y0 = 1,
-  NOR_SPECIAL_2_OPCODE_Y1 = 1,
-  ORI_IMM_0_OPCODE_X0 = 8,
-  ORI_IMM_0_OPCODE_X1 = 11,
-  ORI_OPCODE_Y0 = 11,
-  ORI_OPCODE_Y1 = 9,
-  OR_SPECIAL_0_OPCODE_X0 = 51,
-  OR_SPECIAL_0_OPCODE_X1 = 25,
-  OR_SPECIAL_2_OPCODE_Y0 = 2,
-  OR_SPECIAL_2_OPCODE_Y1 = 2,
-  PACKBS_U_SPECIAL_0_OPCODE_X0 = 103,
-  PACKBS_U_SPECIAL_0_OPCODE_X1 = 73,
-  PACKHB_SPECIAL_0_OPCODE_X0 = 52,
-  PACKHB_SPECIAL_0_OPCODE_X1 = 26,
-  PACKHS_SPECIAL_0_OPCODE_X0 = 102,
-  PACKHS_SPECIAL_0_OPCODE_X1 = 72,
-  PACKLB_SPECIAL_0_OPCODE_X0 = 53,
-  PACKLB_SPECIAL_0_OPCODE_X1 = 27,
-  PCNT_UN_0_SHUN_0_OPCODE_X0 = 7,
-  PCNT_UN_0_SHUN_0_OPCODE_Y0 = 7,
-  RLI_SHUN_0_OPCODE_X0 = 1,
-  RLI_SHUN_0_OPCODE_X1 = 1,
-  RLI_SHUN_0_OPCODE_Y0 = 1,
-  RLI_SHUN_0_OPCODE_Y1 = 1,
-  RL_SPECIAL_0_OPCODE_X0 = 54,
-  RL_SPECIAL_0_OPCODE_X1 = 28,
-  RL_SPECIAL_3_OPCODE_Y0 = 0,
-  RL_SPECIAL_3_OPCODE_Y1 = 0,
-  RR_IMM_0_OPCODE_SN = 0,
-  S1A_SPECIAL_0_OPCODE_X0 = 55,
-  S1A_SPECIAL_0_OPCODE_X1 = 29,
-  S1A_SPECIAL_0_OPCODE_Y0 = 1,
-  S1A_SPECIAL_0_OPCODE_Y1 = 1,
-  S2A_SPECIAL_0_OPCODE_X0 = 56,
-  S2A_SPECIAL_0_OPCODE_X1 = 30,
-  S2A_SPECIAL_0_OPCODE_Y0 = 2,
-  S2A_SPECIAL_0_OPCODE_Y1 = 2,
-  S3A_SPECIAL_0_OPCODE_X0 = 57,
-  S3A_SPECIAL_0_OPCODE_X1 = 31,
-  S3A_SPECIAL_5_OPCODE_Y0 = 1,
-  S3A_SPECIAL_5_OPCODE_Y1 = 1,
-  SADAB_U_SPECIAL_0_OPCODE_X0 = 58,
-  SADAH_SPECIAL_0_OPCODE_X0 = 59,
-  SADAH_U_SPECIAL_0_OPCODE_X0 = 60,
-  SADB_U_SPECIAL_0_OPCODE_X0 = 61,
-  SADH_SPECIAL_0_OPCODE_X0 = 62,
-  SADH_U_SPECIAL_0_OPCODE_X0 = 63,
-  SBADD_IMM_0_OPCODE_X1 = 28,
-  SB_OPCODE_Y2 = 5,
-  SB_SPECIAL_0_OPCODE_X1 = 32,
-  SEQB_SPECIAL_0_OPCODE_X0 = 64,
-  SEQB_SPECIAL_0_OPCODE_X1 = 33,
-  SEQH_SPECIAL_0_OPCODE_X0 = 65,
-  SEQH_SPECIAL_0_OPCODE_X1 = 34,
-  SEQIB_IMM_0_OPCODE_X0 = 9,
-  SEQIB_IMM_0_OPCODE_X1 = 12,
-  SEQIH_IMM_0_OPCODE_X0 = 10,
-  SEQIH_IMM_0_OPCODE_X1 = 13,
-  SEQI_IMM_0_OPCODE_X0 = 11,
-  SEQI_IMM_0_OPCODE_X1 = 14,
-  SEQI_OPCODE_Y0 = 12,
-  SEQI_OPCODE_Y1 = 10,
-  SEQ_SPECIAL_0_OPCODE_X0 = 66,
-  SEQ_SPECIAL_0_OPCODE_X1 = 35,
-  SEQ_SPECIAL_5_OPCODE_Y0 = 2,
-  SEQ_SPECIAL_5_OPCODE_Y1 = 2,
-  SHADD_IMM_0_OPCODE_X1 = 29,
-  SHL8II_IMM_0_OPCODE_SN = 3,
-  SHLB_SPECIAL_0_OPCODE_X0 = 67,
-  SHLB_SPECIAL_0_OPCODE_X1 = 36,
-  SHLH_SPECIAL_0_OPCODE_X0 = 68,
-  SHLH_SPECIAL_0_OPCODE_X1 = 37,
-  SHLIB_SHUN_0_OPCODE_X0 = 2,
-  SHLIB_SHUN_0_OPCODE_X1 = 2,
-  SHLIH_SHUN_0_OPCODE_X0 = 3,
-  SHLIH_SHUN_0_OPCODE_X1 = 3,
-  SHLI_SHUN_0_OPCODE_X0 = 4,
-  SHLI_SHUN_0_OPCODE_X1 = 4,
-  SHLI_SHUN_0_OPCODE_Y0 = 2,
-  SHLI_SHUN_0_OPCODE_Y1 = 2,
-  SHL_SPECIAL_0_OPCODE_X0 = 69,
-  SHL_SPECIAL_0_OPCODE_X1 = 38,
-  SHL_SPECIAL_3_OPCODE_Y0 = 1,
-  SHL_SPECIAL_3_OPCODE_Y1 = 1,
-  SHR1_RR_IMM_0_OPCODE_SN = 9,
-  SHRB_SPECIAL_0_OPCODE_X0 = 70,
-  SHRB_SPECIAL_0_OPCODE_X1 = 39,
-  SHRH_SPECIAL_0_OPCODE_X0 = 71,
-  SHRH_SPECIAL_0_OPCODE_X1 = 40,
-  SHRIB_SHUN_0_OPCODE_X0 = 5,
-  SHRIB_SHUN_0_OPCODE_X1 = 5,
-  SHRIH_SHUN_0_OPCODE_X0 = 6,
-  SHRIH_SHUN_0_OPCODE_X1 = 6,
-  SHRI_SHUN_0_OPCODE_X0 = 7,
-  SHRI_SHUN_0_OPCODE_X1 = 7,
-  SHRI_SHUN_0_OPCODE_Y0 = 3,
-  SHRI_SHUN_0_OPCODE_Y1 = 3,
-  SHR_SPECIAL_0_OPCODE_X0 = 72,
-  SHR_SPECIAL_0_OPCODE_X1 = 41,
-  SHR_SPECIAL_3_OPCODE_Y0 = 2,
-  SHR_SPECIAL_3_OPCODE_Y1 = 2,
-  SHUN_0_OPCODE_X0 = 7,
-  SHUN_0_OPCODE_X1 = 8,
-  SHUN_0_OPCODE_Y0 = 13,
-  SHUN_0_OPCODE_Y1 = 11,
-  SH_OPCODE_Y2 = 6,
-  SH_SPECIAL_0_OPCODE_X1 = 42,
-  SLTB_SPECIAL_0_OPCODE_X0 = 73,
-  SLTB_SPECIAL_0_OPCODE_X1 = 43,
-  SLTB_U_SPECIAL_0_OPCODE_X0 = 74,
-  SLTB_U_SPECIAL_0_OPCODE_X1 = 44,
-  SLTEB_SPECIAL_0_OPCODE_X0 = 75,
-  SLTEB_SPECIAL_0_OPCODE_X1 = 45,
-  SLTEB_U_SPECIAL_0_OPCODE_X0 = 76,
-  SLTEB_U_SPECIAL_0_OPCODE_X1 = 46,
-  SLTEH_SPECIAL_0_OPCODE_X0 = 77,
-  SLTEH_SPECIAL_0_OPCODE_X1 = 47,
-  SLTEH_U_SPECIAL_0_OPCODE_X0 = 78,
-  SLTEH_U_SPECIAL_0_OPCODE_X1 = 48,
-  SLTE_SPECIAL_0_OPCODE_X0 = 79,
-  SLTE_SPECIAL_0_OPCODE_X1 = 49,
-  SLTE_SPECIAL_4_OPCODE_Y0 = 0,
-  SLTE_SPECIAL_4_OPCODE_Y1 = 0,
-  SLTE_U_SPECIAL_0_OPCODE_X0 = 80,
-  SLTE_U_SPECIAL_0_OPCODE_X1 = 50,
-  SLTE_U_SPECIAL_4_OPCODE_Y0 = 1,
-  SLTE_U_SPECIAL_4_OPCODE_Y1 = 1,
-  SLTH_SPECIAL_0_OPCODE_X0 = 81,
-  SLTH_SPECIAL_0_OPCODE_X1 = 51,
-  SLTH_U_SPECIAL_0_OPCODE_X0 = 82,
-  SLTH_U_SPECIAL_0_OPCODE_X1 = 52,
-  SLTIB_IMM_0_OPCODE_X0 = 12,
-  SLTIB_IMM_0_OPCODE_X1 = 15,
-  SLTIB_U_IMM_0_OPCODE_X0 = 13,
-  SLTIB_U_IMM_0_OPCODE_X1 = 16,
-  SLTIH_IMM_0_OPCODE_X0 = 14,
-  SLTIH_IMM_0_OPCODE_X1 = 17,
-  SLTIH_U_IMM_0_OPCODE_X0 = 15,
-  SLTIH_U_IMM_0_OPCODE_X1 = 18,
-  SLTI_IMM_0_OPCODE_X0 = 16,
-  SLTI_IMM_0_OPCODE_X1 = 19,
-  SLTI_OPCODE_Y0 = 14,
-  SLTI_OPCODE_Y1 = 12,
-  SLTI_U_IMM_0_OPCODE_X0 = 17,
-  SLTI_U_IMM_0_OPCODE_X1 = 20,
-  SLTI_U_OPCODE_Y0 = 15,
-  SLTI_U_OPCODE_Y1 = 13,
-  SLT_SPECIAL_0_OPCODE_X0 = 83,
-  SLT_SPECIAL_0_OPCODE_X1 = 53,
-  SLT_SPECIAL_4_OPCODE_Y0 = 2,
-  SLT_SPECIAL_4_OPCODE_Y1 = 2,
-  SLT_U_SPECIAL_0_OPCODE_X0 = 84,
-  SLT_U_SPECIAL_0_OPCODE_X1 = 54,
-  SLT_U_SPECIAL_4_OPCODE_Y0 = 3,
-  SLT_U_SPECIAL_4_OPCODE_Y1 = 3,
-  SNEB_SPECIAL_0_OPCODE_X0 = 85,
-  SNEB_SPECIAL_0_OPCODE_X1 = 55,
-  SNEH_SPECIAL_0_OPCODE_X0 = 86,
-  SNEH_SPECIAL_0_OPCODE_X1 = 56,
-  SNE_SPECIAL_0_OPCODE_X0 = 87,
-  SNE_SPECIAL_0_OPCODE_X1 = 57,
-  SNE_SPECIAL_5_OPCODE_Y0 = 3,
-  SNE_SPECIAL_5_OPCODE_Y1 = 3,
-  SPECIAL_0_OPCODE_X0 = 0,
-  SPECIAL_0_OPCODE_X1 = 1,
-  SPECIAL_0_OPCODE_Y0 = 1,
-  SPECIAL_0_OPCODE_Y1 = 1,
-  SPECIAL_1_OPCODE_Y0 = 2,
-  SPECIAL_1_OPCODE_Y1 = 2,
-  SPECIAL_2_OPCODE_Y0 = 3,
-  SPECIAL_2_OPCODE_Y1 = 3,
-  SPECIAL_3_OPCODE_Y0 = 4,
-  SPECIAL_3_OPCODE_Y1 = 4,
-  SPECIAL_4_OPCODE_Y0 = 5,
-  SPECIAL_4_OPCODE_Y1 = 5,
-  SPECIAL_5_OPCODE_Y0 = 6,
-  SPECIAL_5_OPCODE_Y1 = 6,
-  SPECIAL_6_OPCODE_Y0 = 7,
-  SPECIAL_7_OPCODE_Y0 = 8,
-  SRAB_SPECIAL_0_OPCODE_X0 = 88,
-  SRAB_SPECIAL_0_OPCODE_X1 = 58,
-  SRAH_SPECIAL_0_OPCODE_X0 = 89,
-  SRAH_SPECIAL_0_OPCODE_X1 = 59,
-  SRAIB_SHUN_0_OPCODE_X0 = 8,
-  SRAIB_SHUN_0_OPCODE_X1 = 8,
-  SRAIH_SHUN_0_OPCODE_X0 = 9,
-  SRAIH_SHUN_0_OPCODE_X1 = 9,
-  SRAI_SHUN_0_OPCODE_X0 = 10,
-  SRAI_SHUN_0_OPCODE_X1 = 10,
-  SRAI_SHUN_0_OPCODE_Y0 = 4,
-  SRAI_SHUN_0_OPCODE_Y1 = 4,
-  SRA_SPECIAL_0_OPCODE_X0 = 90,
-  SRA_SPECIAL_0_OPCODE_X1 = 60,
-  SRA_SPECIAL_3_OPCODE_Y0 = 3,
-  SRA_SPECIAL_3_OPCODE_Y1 = 3,
-  SUBBS_U_SPECIAL_0_OPCODE_X0 = 100,
-  SUBBS_U_SPECIAL_0_OPCODE_X1 = 70,
-  SUBB_SPECIAL_0_OPCODE_X0 = 91,
-  SUBB_SPECIAL_0_OPCODE_X1 = 61,
-  SUBHS_SPECIAL_0_OPCODE_X0 = 101,
-  SUBHS_SPECIAL_0_OPCODE_X1 = 71,
-  SUBH_SPECIAL_0_OPCODE_X0 = 92,
-  SUBH_SPECIAL_0_OPCODE_X1 = 62,
-  SUBS_SPECIAL_0_OPCODE_X0 = 97,
-  SUBS_SPECIAL_0_OPCODE_X1 = 67,
-  SUB_SPECIAL_0_OPCODE_X0 = 93,
-  SUB_SPECIAL_0_OPCODE_X1 = 63,
-  SUB_SPECIAL_0_OPCODE_Y0 = 3,
-  SUB_SPECIAL_0_OPCODE_Y1 = 3,
-  SWADD_IMM_0_OPCODE_X1 = 30,
-  SWINT0_UN_0_SHUN_0_OPCODE_X1 = 18,
-  SWINT1_UN_0_SHUN_0_OPCODE_X1 = 19,
-  SWINT2_UN_0_SHUN_0_OPCODE_X1 = 20,
-  SWINT3_UN_0_SHUN_0_OPCODE_X1 = 21,
-  SW_OPCODE_Y2 = 7,
-  SW_SPECIAL_0_OPCODE_X1 = 64,
-  TBLIDXB0_UN_0_SHUN_0_OPCODE_X0 = 8,
-  TBLIDXB0_UN_0_SHUN_0_OPCODE_Y0 = 8,
-  TBLIDXB1_UN_0_SHUN_0_OPCODE_X0 = 9,
-  TBLIDXB1_UN_0_SHUN_0_OPCODE_Y0 = 9,
-  TBLIDXB2_UN_0_SHUN_0_OPCODE_X0 = 10,
-  TBLIDXB2_UN_0_SHUN_0_OPCODE_Y0 = 10,
-  TBLIDXB3_UN_0_SHUN_0_OPCODE_X0 = 11,
-  TBLIDXB3_UN_0_SHUN_0_OPCODE_Y0 = 11,
-  TNS_UN_0_SHUN_0_OPCODE_X1 = 22,
-  UN_0_SHUN_0_OPCODE_X0 = 11,
-  UN_0_SHUN_0_OPCODE_X1 = 11,
-  UN_0_SHUN_0_OPCODE_Y0 = 5,
-  UN_0_SHUN_0_OPCODE_Y1 = 5,
-  WH64_UN_0_SHUN_0_OPCODE_X1 = 23,
-  XORI_IMM_0_OPCODE_X0 = 2,
-  XORI_IMM_0_OPCODE_X1 = 21,
-  XOR_SPECIAL_0_OPCODE_X0 = 94,
-  XOR_SPECIAL_0_OPCODE_X1 = 65,
-  XOR_SPECIAL_2_OPCODE_Y0 = 3,
-  XOR_SPECIAL_2_OPCODE_Y1 = 3
+  ADDI_IMM8_OPCODE_X0 = 1,
+  ADDI_IMM8_OPCODE_X1 = 1,
+  ADDI_OPCODE_Y0 = 0,
+  ADDI_OPCODE_Y1 = 1,
+  ADDLI_OPCODE_X0 = 1,
+  ADDLI_OPCODE_X1 = 0,
+  ADDXI_IMM8_OPCODE_X0 = 2,
+  ADDXI_IMM8_OPCODE_X1 = 2,
+  ADDXI_OPCODE_Y0 = 1,
+  ADDXI_OPCODE_Y1 = 2,
+  ADDXLI_OPCODE_X0 = 2,
+  ADDXLI_OPCODE_X1 = 1,
+  ADDXSC_RRR_0_OPCODE_X0 = 1,
+  ADDXSC_RRR_0_OPCODE_X1 = 1,
+  ADDX_RRR_0_OPCODE_X0 = 2,
+  ADDX_RRR_0_OPCODE_X1 = 2,
+  ADDX_RRR_0_OPCODE_Y0 = 0,
+  ADDX_SPECIAL_0_OPCODE_Y1 = 0,
+  ADD_RRR_0_OPCODE_X0 = 3,
+  ADD_RRR_0_OPCODE_X1 = 3,
+  ADD_RRR_0_OPCODE_Y0 = 1,
+  ADD_SPECIAL_0_OPCODE_Y1 = 1,
+  ANDI_IMM8_OPCODE_X0 = 3,
+  ANDI_IMM8_OPCODE_X1 = 3,
+  ANDI_OPCODE_Y0 = 2,
+  ANDI_OPCODE_Y1 = 3,
+  AND_RRR_0_OPCODE_X0 = 4,
+  AND_RRR_0_OPCODE_X1 = 4,
+  AND_RRR_5_OPCODE_Y0 = 0,
+  AND_RRR_5_OPCODE_Y1 = 0,
+  BEQZT_BRANCH_OPCODE_X1 = 16,
+  BEQZ_BRANCH_OPCODE_X1 = 17,
+  BFEXTS_BF_OPCODE_X0 = 4,
+  BFEXTU_BF_OPCODE_X0 = 5,
+  BFINS_BF_OPCODE_X0 = 6,
+  BF_OPCODE_X0 = 3,
+  BGEZT_BRANCH_OPCODE_X1 = 18,
+  BGEZ_BRANCH_OPCODE_X1 = 19,
+  BGTZT_BRANCH_OPCODE_X1 = 20,
+  BGTZ_BRANCH_OPCODE_X1 = 21,
+  BLBCT_BRANCH_OPCODE_X1 = 22,
+  BLBC_BRANCH_OPCODE_X1 = 23,
+  BLBST_BRANCH_OPCODE_X1 = 24,
+  BLBS_BRANCH_OPCODE_X1 = 25,
+  BLEZT_BRANCH_OPCODE_X1 = 26,
+  BLEZ_BRANCH_OPCODE_X1 = 27,
+  BLTZT_BRANCH_OPCODE_X1 = 28,
+  BLTZ_BRANCH_OPCODE_X1 = 29,
+  BNEZT_BRANCH_OPCODE_X1 = 30,
+  BNEZ_BRANCH_OPCODE_X1 = 31,
+  BRANCH_OPCODE_X1 = 2,
+  CMOVEQZ_RRR_0_OPCODE_X0 = 5,
+  CMOVEQZ_RRR_4_OPCODE_Y0 = 0,
+  CMOVNEZ_RRR_0_OPCODE_X0 = 6,
+  CMOVNEZ_RRR_4_OPCODE_Y0 = 1,
+  CMPEQI_IMM8_OPCODE_X0 = 4,
+  CMPEQI_IMM8_OPCODE_X1 = 4,
+  CMPEQI_OPCODE_Y0 = 3,
+  CMPEQI_OPCODE_Y1 = 4,
+  CMPEQ_RRR_0_OPCODE_X0 = 7,
+  CMPEQ_RRR_0_OPCODE_X1 = 5,
+  CMPEQ_RRR_3_OPCODE_Y0 = 0,
+  CMPEQ_RRR_3_OPCODE_Y1 = 2,
+  CMPEXCH4_RRR_0_OPCODE_X1 = 6,
+  CMPEXCH_RRR_0_OPCODE_X1 = 7,
+  CMPLES_RRR_0_OPCODE_X0 = 8,
+  CMPLES_RRR_0_OPCODE_X1 = 8,
+  CMPLES_RRR_2_OPCODE_Y0 = 0,
+  CMPLES_RRR_2_OPCODE_Y1 = 0,
+  CMPLEU_RRR_0_OPCODE_X0 = 9,
+  CMPLEU_RRR_0_OPCODE_X1 = 9,
+  CMPLEU_RRR_2_OPCODE_Y0 = 1,
+  CMPLEU_RRR_2_OPCODE_Y1 = 1,
+  CMPLTSI_IMM8_OPCODE_X0 = 5,
+  CMPLTSI_IMM8_OPCODE_X1 = 5,
+  CMPLTSI_OPCODE_Y0 = 4,
+  CMPLTSI_OPCODE_Y1 = 5,
+  CMPLTS_RRR_0_OPCODE_X0 = 10,
+  CMPLTS_RRR_0_OPCODE_X1 = 10,
+  CMPLTS_RRR_2_OPCODE_Y0 = 2,
+  CMPLTS_RRR_2_OPCODE_Y1 = 2,
+  CMPLTUI_IMM8_OPCODE_X0 = 6,
+  CMPLTUI_IMM8_OPCODE_X1 = 6,
+  CMPLTU_RRR_0_OPCODE_X0 = 11,
+  CMPLTU_RRR_0_OPCODE_X1 = 11,
+  CMPLTU_RRR_2_OPCODE_Y0 = 3,
+  CMPLTU_RRR_2_OPCODE_Y1 = 3,
+  CMPNE_RRR_0_OPCODE_X0 = 12,
+  CMPNE_RRR_0_OPCODE_X1 = 12,
+  CMPNE_RRR_3_OPCODE_Y0 = 1,
+  CMPNE_RRR_3_OPCODE_Y1 = 3,
+  CMULAF_RRR_0_OPCODE_X0 = 13,
+  CMULA_RRR_0_OPCODE_X0 = 14,
+  CMULFR_RRR_0_OPCODE_X0 = 15,
+  CMULF_RRR_0_OPCODE_X0 = 16,
+  CMULHR_RRR_0_OPCODE_X0 = 17,
+  CMULH_RRR_0_OPCODE_X0 = 18,
+  CMUL_RRR_0_OPCODE_X0 = 19,
+  CNTLZ_UNARY_OPCODE_X0 = 1,
+  CNTLZ_UNARY_OPCODE_Y0 = 1,
+  CNTTZ_UNARY_OPCODE_X0 = 2,
+  CNTTZ_UNARY_OPCODE_Y0 = 2,
+  CRC32_32_RRR_0_OPCODE_X0 = 20,
+  CRC32_8_RRR_0_OPCODE_X0 = 21,
+  DBLALIGN2_RRR_0_OPCODE_X0 = 22,
+  DBLALIGN2_RRR_0_OPCODE_X1 = 13,
+  DBLALIGN4_RRR_0_OPCODE_X0 = 23,
+  DBLALIGN4_RRR_0_OPCODE_X1 = 14,
+  DBLALIGN6_RRR_0_OPCODE_X0 = 24,
+  DBLALIGN6_RRR_0_OPCODE_X1 = 15,
+  DBLALIGN_RRR_0_OPCODE_X0 = 25,
+  DRAIN_UNARY_OPCODE_X1 = 1,
+  DTLBPR_UNARY_OPCODE_X1 = 2,
+  EXCH4_RRR_0_OPCODE_X1 = 16,
+  EXCH_RRR_0_OPCODE_X1 = 17,
+  FDOUBLE_ADDSUB_RRR_0_OPCODE_X0 = 26,
+  FDOUBLE_ADD_FLAGS_RRR_0_OPCODE_X0 = 27,
+  FDOUBLE_MUL_FLAGS_RRR_0_OPCODE_X0 = 28,
+  FDOUBLE_PACK1_RRR_0_OPCODE_X0 = 29,
+  FDOUBLE_PACK2_RRR_0_OPCODE_X0 = 30,
+  FDOUBLE_SUB_FLAGS_RRR_0_OPCODE_X0 = 31,
+  FDOUBLE_UNPACK_MAX_RRR_0_OPCODE_X0 = 32,
+  FDOUBLE_UNPACK_MIN_RRR_0_OPCODE_X0 = 33,
+  FETCHADD4_RRR_0_OPCODE_X1 = 18,
+  FETCHADDGEZ4_RRR_0_OPCODE_X1 = 19,
+  FETCHADDGEZ_RRR_0_OPCODE_X1 = 20,
+  FETCHADD_RRR_0_OPCODE_X1 = 21,
+  FETCHAND4_RRR_0_OPCODE_X1 = 22,
+  FETCHAND_RRR_0_OPCODE_X1 = 23,
+  FETCHOR4_RRR_0_OPCODE_X1 = 24,
+  FETCHOR_RRR_0_OPCODE_X1 = 25,
+  FINV_UNARY_OPCODE_X1 = 3,
+  FLUSHWB_UNARY_OPCODE_X1 = 4,
+  FLUSH_UNARY_OPCODE_X1 = 5,
+  FNOP_UNARY_OPCODE_X0 = 3,
+  FNOP_UNARY_OPCODE_X1 = 6,
+  FNOP_UNARY_OPCODE_Y0 = 3,
+  FNOP_UNARY_OPCODE_Y1 = 8,
+  FSINGLE_ADD1_RRR_0_OPCODE_X0 = 34,
+  FSINGLE_ADDSUB2_RRR_0_OPCODE_X0 = 35,
+  FSINGLE_MUL1_RRR_0_OPCODE_X0 = 36,
+  FSINGLE_MUL2_RRR_0_OPCODE_X0 = 37,
+  FSINGLE_PACK1_UNARY_OPCODE_X0 = 4,
+  FSINGLE_PACK1_UNARY_OPCODE_Y0 = 4,
+  FSINGLE_PACK2_RRR_0_OPCODE_X0 = 38,
+  FSINGLE_SUB1_RRR_0_OPCODE_X0 = 39,
+  ICOH_UNARY_OPCODE_X1 = 7,
+  ILL_UNARY_OPCODE_X1 = 8,
+  ILL_UNARY_OPCODE_Y1 = 9,
+  IMM8_OPCODE_X0 = 4,
+  IMM8_OPCODE_X1 = 3,
+  INV_UNARY_OPCODE_X1 = 9,
+  IRET_UNARY_OPCODE_X1 = 10,
+  JALRP_UNARY_OPCODE_X1 = 11,
+  JALRP_UNARY_OPCODE_Y1 = 10,
+  JALR_UNARY_OPCODE_X1 = 12,
+  JALR_UNARY_OPCODE_Y1 = 11,
+  JAL_JUMP_OPCODE_X1 = 0,
+  JRP_UNARY_OPCODE_X1 = 13,
+  JRP_UNARY_OPCODE_Y1 = 12,
+  JR_UNARY_OPCODE_X1 = 14,
+  JR_UNARY_OPCODE_Y1 = 13,
+  JUMP_OPCODE_X1 = 4,
+  J_JUMP_OPCODE_X1 = 1,
+  LD1S_ADD_IMM8_OPCODE_X1 = 7,
+  LD1S_OPCODE_Y2 = 0,
+  LD1S_UNARY_OPCODE_X1 = 15,
+  LD1U_ADD_IMM8_OPCODE_X1 = 8,
+  LD1U_OPCODE_Y2 = 1,
+  LD1U_UNARY_OPCODE_X1 = 16,
+  LD2S_ADD_IMM8_OPCODE_X1 = 9,
+  LD2S_OPCODE_Y2 = 2,
+  LD2S_UNARY_OPCODE_X1 = 17,
+  LD2U_ADD_IMM8_OPCODE_X1 = 10,
+  LD2U_OPCODE_Y2 = 3,
+  LD2U_UNARY_OPCODE_X1 = 18,
+  LD4S_ADD_IMM8_OPCODE_X1 = 11,
+  LD4S_OPCODE_Y2 = 1,
+  LD4S_UNARY_OPCODE_X1 = 19,
+  LD4U_ADD_IMM8_OPCODE_X1 = 12,
+  LD4U_OPCODE_Y2 = 2,
+  LD4U_UNARY_OPCODE_X1 = 20,
+  LDNA_UNARY_OPCODE_X1 = 21,
+  LDNT1S_ADD_IMM8_OPCODE_X1 = 13,
+  LDNT1S_UNARY_OPCODE_X1 = 22,
+  LDNT1U_ADD_IMM8_OPCODE_X1 = 14,
+  LDNT1U_UNARY_OPCODE_X1 = 23,
+  LDNT2S_ADD_IMM8_OPCODE_X1 = 15,
+  LDNT2S_UNARY_OPCODE_X1 = 24,
+  LDNT2U_ADD_IMM8_OPCODE_X1 = 16,
+  LDNT2U_UNARY_OPCODE_X1 = 25,
+  LDNT4S_ADD_IMM8_OPCODE_X1 = 17,
+  LDNT4S_UNARY_OPCODE_X1 = 26,
+  LDNT4U_ADD_IMM8_OPCODE_X1 = 18,
+  LDNT4U_UNARY_OPCODE_X1 = 27,
+  LDNT_ADD_IMM8_OPCODE_X1 = 19,
+  LDNT_UNARY_OPCODE_X1 = 28,
+  LD_ADD_IMM8_OPCODE_X1 = 20,
+  LD_OPCODE_Y2 = 3,
+  LD_UNARY_OPCODE_X1 = 29,
+  LNK_UNARY_OPCODE_X1 = 30,
+  LNK_UNARY_OPCODE_Y1 = 14,
+  LWNA_ADD_IMM8_OPCODE_X1 = 21,
+  MFSPR_IMM8_OPCODE_X1 = 22,
+  MF_UNARY_OPCODE_X1 = 31,
+  MM_BF_OPCODE_X0 = 7,
+  MNZ_RRR_0_OPCODE_X0 = 40,
+  MNZ_RRR_0_OPCODE_X1 = 26,
+  MNZ_RRR_4_OPCODE_Y0 = 2,
+  MNZ_RRR_4_OPCODE_Y1 = 2,
+  MODE_OPCODE_YA2 = 1,
+  MODE_OPCODE_YB2 = 2,
+  MODE_OPCODE_YC2 = 3,
+  MTSPR_IMM8_OPCODE_X1 = 23,
+  MULAX_RRR_0_OPCODE_X0 = 41,
+  MULAX_RRR_3_OPCODE_Y0 = 2,
+  MULA_HS_HS_RRR_0_OPCODE_X0 = 42,
+  MULA_HS_HS_RRR_9_OPCODE_Y0 = 0,
+  MULA_HS_HU_RRR_0_OPCODE_X0 = 43,
+  MULA_HS_LS_RRR_0_OPCODE_X0 = 44,
+  MULA_HS_LU_RRR_0_OPCODE_X0 = 45,
+  MULA_HU_HU_RRR_0_OPCODE_X0 = 46,
+  MULA_HU_HU_RRR_9_OPCODE_Y0 = 1,
+  MULA_HU_LS_RRR_0_OPCODE_X0 = 47,
+  MULA_HU_LU_RRR_0_OPCODE_X0 = 48,
+  MULA_LS_LS_RRR_0_OPCODE_X0 = 49,
+  MULA_LS_LS_RRR_9_OPCODE_Y0 = 2,
+  MULA_LS_LU_RRR_0_OPCODE_X0 = 50,
+  MULA_LU_LU_RRR_0_OPCODE_X0 = 51,
+  MULA_LU_LU_RRR_9_OPCODE_Y0 = 3,
+  MULX_RRR_0_OPCODE_X0 = 52,
+  MULX_RRR_3_OPCODE_Y0 = 3,
+  MUL_HS_HS_RRR_0_OPCODE_X0 = 53,
+  MUL_HS_HS_RRR_8_OPCODE_Y0 = 0,
+  MUL_HS_HU_RRR_0_OPCODE_X0 = 54,
+  MUL_HS_LS_RRR_0_OPCODE_X0 = 55,
+  MUL_HS_LU_RRR_0_OPCODE_X0 = 56,
+  MUL_HU_HU_RRR_0_OPCODE_X0 = 57,
+  MUL_HU_HU_RRR_8_OPCODE_Y0 = 1,
+  MUL_HU_LS_RRR_0_OPCODE_X0 = 58,
+  MUL_HU_LU_RRR_0_OPCODE_X0 = 59,
+  MUL_LS_LS_RRR_0_OPCODE_X0 = 60,
+  MUL_LS_LS_RRR_8_OPCODE_Y0 = 2,
+  MUL_LS_LU_RRR_0_OPCODE_X0 = 61,
+  MUL_LU_LU_RRR_0_OPCODE_X0 = 62,
+  MUL_LU_LU_RRR_8_OPCODE_Y0 = 3,
+  MZ_RRR_0_OPCODE_X0 = 63,
+  MZ_RRR_0_OPCODE_X1 = 27,
+  MZ_RRR_4_OPCODE_Y0 = 3,
+  MZ_RRR_4_OPCODE_Y1 = 3,
+  NAP_UNARY_OPCODE_X1 = 32,
+  NOP_UNARY_OPCODE_X0 = 5,
+  NOP_UNARY_OPCODE_X1 = 33,
+  NOP_UNARY_OPCODE_Y0 = 5,
+  NOP_UNARY_OPCODE_Y1 = 15,
+  NOR_RRR_0_OPCODE_X0 = 64,
+  NOR_RRR_0_OPCODE_X1 = 28,
+  NOR_RRR_5_OPCODE_Y0 = 1,
+  NOR_RRR_5_OPCODE_Y1 = 1,
+  ORI_IMM8_OPCODE_X0 = 7,
+  ORI_IMM8_OPCODE_X1 = 24,
+  OR_RRR_0_OPCODE_X0 = 65,
+  OR_RRR_0_OPCODE_X1 = 29,
+  OR_RRR_5_OPCODE_Y0 = 2,
+  OR_RRR_5_OPCODE_Y1 = 2,
+  PCNT_UNARY_OPCODE_X0 = 6,
+  PCNT_UNARY_OPCODE_Y0 = 6,
+  REVBITS_UNARY_OPCODE_X0 = 7,
+  REVBITS_UNARY_OPCODE_Y0 = 7,
+  REVBYTES_UNARY_OPCODE_X0 = 8,
+  REVBYTES_UNARY_OPCODE_Y0 = 8,
+  ROTLI_SHIFT_OPCODE_X0 = 1,
+  ROTLI_SHIFT_OPCODE_X1 = 1,
+  ROTLI_SHIFT_OPCODE_Y0 = 0,
+  ROTLI_SHIFT_OPCODE_Y1 = 0,
+  ROTL_RRR_0_OPCODE_X0 = 66,
+  ROTL_RRR_0_OPCODE_X1 = 30,
+  ROTL_RRR_6_OPCODE_Y0 = 0,
+  ROTL_RRR_6_OPCODE_Y1 = 0,
+  RRR_0_OPCODE_X0 = 5,
+  RRR_0_OPCODE_X1 = 5,
+  RRR_0_OPCODE_Y0 = 5,
+  RRR_0_OPCODE_Y1 = 6,
+  RRR_1_OPCODE_Y0 = 6,
+  RRR_1_OPCODE_Y1 = 7,
+  RRR_2_OPCODE_Y0 = 7,
+  RRR_2_OPCODE_Y1 = 8,
+  RRR_3_OPCODE_Y0 = 8,
+  RRR_3_OPCODE_Y1 = 9,
+  RRR_4_OPCODE_Y0 = 9,
+  RRR_4_OPCODE_Y1 = 10,
+  RRR_5_OPCODE_Y0 = 10,
+  RRR_5_OPCODE_Y1 = 11,
+  RRR_6_OPCODE_Y0 = 11,
+  RRR_6_OPCODE_Y1 = 12,
+  RRR_7_OPCODE_Y0 = 12,
+  RRR_7_OPCODE_Y1 = 13,
+  RRR_8_OPCODE_Y0 = 13,
+  RRR_9_OPCODE_Y0 = 14,
+  SHIFT_OPCODE_X0 = 6,
+  SHIFT_OPCODE_X1 = 6,
+  SHIFT_OPCODE_Y0 = 15,
+  SHIFT_OPCODE_Y1 = 14,
+  SHL16INSLI_OPCODE_X0 = 7,
+  SHL16INSLI_OPCODE_X1 = 7,
+  SHL1ADDX_RRR_0_OPCODE_X0 = 67,
+  SHL1ADDX_RRR_0_OPCODE_X1 = 31,
+  SHL1ADDX_RRR_7_OPCODE_Y0 = 1,
+  SHL1ADDX_RRR_7_OPCODE_Y1 = 1,
+  SHL1ADD_RRR_0_OPCODE_X0 = 68,
+  SHL1ADD_RRR_0_OPCODE_X1 = 32,
+  SHL1ADD_RRR_1_OPCODE_Y0 = 0,
+  SHL1ADD_RRR_1_OPCODE_Y1 = 0,
+  SHL2ADDX_RRR_0_OPCODE_X0 = 69,
+  SHL2ADDX_RRR_0_OPCODE_X1 = 33,
+  SHL2ADDX_RRR_7_OPCODE_Y0 = 2,
+  SHL2ADDX_RRR_7_OPCODE_Y1 = 2,
+  SHL2ADD_RRR_0_OPCODE_X0 = 70,
+  SHL2ADD_RRR_0_OPCODE_X1 = 34,
+  SHL2ADD_RRR_1_OPCODE_Y0 = 1,
+  SHL2ADD_RRR_1_OPCODE_Y1 = 1,
+  SHL3ADDX_RRR_0_OPCODE_X0 = 71,
+  SHL3ADDX_RRR_0_OPCODE_X1 = 35,
+  SHL3ADDX_RRR_7_OPCODE_Y0 = 3,
+  SHL3ADDX_RRR_7_OPCODE_Y1 = 3,
+  SHL3ADD_RRR_0_OPCODE_X0 = 72,
+  SHL3ADD_RRR_0_OPCODE_X1 = 36,
+  SHL3ADD_RRR_1_OPCODE_Y0 = 2,
+  SHL3ADD_RRR_1_OPCODE_Y1 = 2,
+  SHLI_SHIFT_OPCODE_X0 = 2,
+  SHLI_SHIFT_OPCODE_X1 = 2,
+  SHLI_SHIFT_OPCODE_Y0 = 1,
+  SHLI_SHIFT_OPCODE_Y1 = 1,
+  SHLXI_SHIFT_OPCODE_X0 = 3,
+  SHLXI_SHIFT_OPCODE_X1 = 3,
+  SHLX_RRR_0_OPCODE_X0 = 73,
+  SHLX_RRR_0_OPCODE_X1 = 37,
+  SHL_RRR_0_OPCODE_X0 = 74,
+  SHL_RRR_0_OPCODE_X1 = 38,
+  SHL_RRR_6_OPCODE_Y0 = 1,
+  SHL_RRR_6_OPCODE_Y1 = 1,
+  SHRSI_SHIFT_OPCODE_X0 = 4,
+  SHRSI_SHIFT_OPCODE_X1 = 4,
+  SHRSI_SHIFT_OPCODE_Y0 = 2,
+  SHRSI_SHIFT_OPCODE_Y1 = 2,
+  SHRS_RRR_0_OPCODE_X0 = 75,
+  SHRS_RRR_0_OPCODE_X1 = 39,
+  SHRS_RRR_6_OPCODE_Y0 = 2,
+  SHRS_RRR_6_OPCODE_Y1 = 2,
+  SHRUI_SHIFT_OPCODE_X0 = 5,
+  SHRUI_SHIFT_OPCODE_X1 = 5,
+  SHRUI_SHIFT_OPCODE_Y0 = 3,
+  SHRUI_SHIFT_OPCODE_Y1 = 3,
+  SHRUXI_SHIFT_OPCODE_X0 = 6,
+  SHRUXI_SHIFT_OPCODE_X1 = 6,
+  SHRUX_RRR_0_OPCODE_X0 = 76,
+  SHRUX_RRR_0_OPCODE_X1 = 40,
+  SHRU_RRR_0_OPCODE_X0 = 77,
+  SHRU_RRR_0_OPCODE_X1 = 41,
+  SHRU_RRR_6_OPCODE_Y0 = 3,
+  SHRU_RRR_6_OPCODE_Y1 = 3,
+  SHUFFLEBYTES_RRR_0_OPCODE_X0 = 78,
+  ST1_ADD_IMM8_OPCODE_X1 = 25,
+  ST1_OPCODE_Y2 = 0,
+  ST1_RRR_0_OPCODE_X1 = 42,
+  ST2_ADD_IMM8_OPCODE_X1 = 26,
+  ST2_OPCODE_Y2 = 1,
+  ST2_RRR_0_OPCODE_X1 = 43,
+  ST4_ADD_IMM8_OPCODE_X1 = 27,
+  ST4_OPCODE_Y2 = 2,
+  ST4_RRR_0_OPCODE_X1 = 44,
+  STNT1_ADD_IMM8_OPCODE_X1 = 28,
+  STNT1_RRR_0_OPCODE_X1 = 45,
+  STNT2_ADD_IMM8_OPCODE_X1 = 29,
+  STNT2_RRR_0_OPCODE_X1 = 46,
+  STNT4_ADD_IMM8_OPCODE_X1 = 30,
+  STNT4_RRR_0_OPCODE_X1 = 47,
+  STNT_ADD_IMM8_OPCODE_X1 = 31,
+  STNT_RRR_0_OPCODE_X1 = 48,
+  ST_ADD_IMM8_OPCODE_X1 = 32,
+  ST_OPCODE_Y2 = 3,
+  ST_RRR_0_OPCODE_X1 = 49,
+  SUBXSC_RRR_0_OPCODE_X0 = 79,
+  SUBXSC_RRR_0_OPCODE_X1 = 50,
+  SUBX_RRR_0_OPCODE_X0 = 80,
+  SUBX_RRR_0_OPCODE_X1 = 51,
+  SUBX_RRR_0_OPCODE_Y0 = 2,
+  SUBX_RRR_0_OPCODE_Y1 = 2,
+  SUB_RRR_0_OPCODE_X0 = 81,
+  SUB_RRR_0_OPCODE_X1 = 52,
+  SUB_RRR_0_OPCODE_Y0 = 3,
+  SUB_RRR_0_OPCODE_Y1 = 3,
+  SWINT0_UNARY_OPCODE_X1 = 34,
+  SWINT1_UNARY_OPCODE_X1 = 35,
+  SWINT2_UNARY_OPCODE_X1 = 36,
+  SWINT3_UNARY_OPCODE_X1 = 37,
+  TBLIDXB0_UNARY_OPCODE_X0 = 9,
+  TBLIDXB0_UNARY_OPCODE_Y0 = 9,
+  TBLIDXB1_UNARY_OPCODE_X0 = 10,
+  TBLIDXB1_UNARY_OPCODE_Y0 = 10,
+  TBLIDXB2_UNARY_OPCODE_X0 = 11,
+  TBLIDXB2_UNARY_OPCODE_Y0 = 11,
+  TBLIDXB3_UNARY_OPCODE_X0 = 12,
+  TBLIDXB3_UNARY_OPCODE_Y0 = 12,
+  UNARY_RRR_0_OPCODE_X0 = 82,
+  UNARY_RRR_0_OPCODE_X1 = 53,
+  UNARY_RRR_1_OPCODE_Y0 = 3,
+  UNARY_RRR_1_OPCODE_Y1 = 3,
+  V1ADDI_IMM8_OPCODE_X0 = 8,
+  V1ADDI_IMM8_OPCODE_X1 = 33,
+  V1ADDUC_RRR_0_OPCODE_X0 = 83,
+  V1ADDUC_RRR_0_OPCODE_X1 = 54,
+  V1ADD_RRR_0_OPCODE_X0 = 84,
+  V1ADD_RRR_0_OPCODE_X1 = 55,
+  V1ADIFFU_RRR_0_OPCODE_X0 = 85,
+  V1AVGU_RRR_0_OPCODE_X0 = 86,
+  V1CMPEQI_IMM8_OPCODE_X0 = 9,
+  V1CMPEQI_IMM8_OPCODE_X1 = 34,
+  V1CMPEQ_RRR_0_OPCODE_X0 = 87,
+  V1CMPEQ_RRR_0_OPCODE_X1 = 56,
+  V1CMPLES_RRR_0_OPCODE_X0 = 88,
+  V1CMPLES_RRR_0_OPCODE_X1 = 57,
+  V1CMPLEU_RRR_0_OPCODE_X0 = 89,
+  V1CMPLEU_RRR_0_OPCODE_X1 = 58,
+  V1CMPLTSI_IMM8_OPCODE_X0 = 10,
+  V1CMPLTSI_IMM8_OPCODE_X1 = 35,
+  V1CMPLTS_RRR_0_OPCODE_X0 = 90,
+  V1CMPLTS_RRR_0_OPCODE_X1 = 59,
+  V1CMPLTUI_IMM8_OPCODE_X0 = 11,
+  V1CMPLTUI_IMM8_OPCODE_X1 = 36,
+  V1CMPLTU_RRR_0_OPCODE_X0 = 91,
+  V1CMPLTU_RRR_0_OPCODE_X1 = 60,
+  V1CMPNE_RRR_0_OPCODE_X0 = 92,
+  V1CMPNE_RRR_0_OPCODE_X1 = 61,
+  V1DDOTPUA_RRR_0_OPCODE_X0 = 161,
+  V1DDOTPUSA_RRR_0_OPCODE_X0 = 93,
+  V1DDOTPUS_RRR_0_OPCODE_X0 = 94,
+  V1DDOTPU_RRR_0_OPCODE_X0 = 162,
+  V1DOTPA_RRR_0_OPCODE_X0 = 95,
+  V1DOTPUA_RRR_0_OPCODE_X0 = 163,
+  V1DOTPUSA_RRR_0_OPCODE_X0 = 96,
+  V1DOTPUS_RRR_0_OPCODE_X0 = 97,
+  V1DOTPU_RRR_0_OPCODE_X0 = 164,
+  V1DOTP_RRR_0_OPCODE_X0 = 98,
+  V1INT_H_RRR_0_OPCODE_X0 = 99,
+  V1INT_H_RRR_0_OPCODE_X1 = 62,
+  V1INT_L_RRR_0_OPCODE_X0 = 100,
+  V1INT_L_RRR_0_OPCODE_X1 = 63,
+  V1MAXUI_IMM8_OPCODE_X0 = 12,
+  V1MAXUI_IMM8_OPCODE_X1 = 37,
+  V1MAXU_RRR_0_OPCODE_X0 = 101,
+  V1MAXU_RRR_0_OPCODE_X1 = 64,
+  V1MINUI_IMM8_OPCODE_X0 = 13,
+  V1MINUI_IMM8_OPCODE_X1 = 38,
+  V1MINU_RRR_0_OPCODE_X0 = 102,
+  V1MINU_RRR_0_OPCODE_X1 = 65,
+  V1MNZ_RRR_0_OPCODE_X0 = 103,
+  V1MNZ_RRR_0_OPCODE_X1 = 66,
+  V1MULTU_RRR_0_OPCODE_X0 = 104,
+  V1MULUS_RRR_0_OPCODE_X0 = 105,
+  V1MULU_RRR_0_OPCODE_X0 = 106,
+  V1MZ_RRR_0_OPCODE_X0 = 107,
+  V1MZ_RRR_0_OPCODE_X1 = 67,
+  V1SADAU_RRR_0_OPCODE_X0 = 108,
+  V1SADU_RRR_0_OPCODE_X0 = 109,
+  V1SHLI_SHIFT_OPCODE_X0 = 7,
+  V1SHLI_SHIFT_OPCODE_X1 = 7,
+  V1SHL_RRR_0_OPCODE_X0 = 110,
+  V1SHL_RRR_0_OPCODE_X1 = 68,
+  V1SHRSI_SHIFT_OPCODE_X0 = 8,
+  V1SHRSI_SHIFT_OPCODE_X1 = 8,
+  V1SHRS_RRR_0_OPCODE_X0 = 111,
+  V1SHRS_RRR_0_OPCODE_X1 = 69,
+  V1SHRUI_SHIFT_OPCODE_X0 = 9,
+  V1SHRUI_SHIFT_OPCODE_X1 = 9,
+  V1SHRU_RRR_0_OPCODE_X0 = 112,
+  V1SHRU_RRR_0_OPCODE_X1 = 70,
+  V1SUBUC_RRR_0_OPCODE_X0 = 113,
+  V1SUBUC_RRR_0_OPCODE_X1 = 71,
+  V1SUB_RRR_0_OPCODE_X0 = 114,
+  V1SUB_RRR_0_OPCODE_X1 = 72,
+  V2ADDI_IMM8_OPCODE_X0 = 14,
+  V2ADDI_IMM8_OPCODE_X1 = 39,
+  V2ADDSC_RRR_0_OPCODE_X0 = 115,
+  V2ADDSC_RRR_0_OPCODE_X1 = 73,
+  V2ADD_RRR_0_OPCODE_X0 = 116,
+  V2ADD_RRR_0_OPCODE_X1 = 74,
+  V2ADIFFS_RRR_0_OPCODE_X0 = 117,
+  V2AVGS_RRR_0_OPCODE_X0 = 118,
+  V2CMPEQI_IMM8_OPCODE_X0 = 15,
+  V2CMPEQI_IMM8_OPCODE_X1 = 40,
+  V2CMPEQ_RRR_0_OPCODE_X0 = 119,
+  V2CMPEQ_RRR_0_OPCODE_X1 = 75,
+  V2CMPLES_RRR_0_OPCODE_X0 = 120,
+  V2CMPLES_RRR_0_OPCODE_X1 = 76,
+  V2CMPLEU_RRR_0_OPCODE_X0 = 121,
+  V2CMPLEU_RRR_0_OPCODE_X1 = 77,
+  V2CMPLTSI_IMM8_OPCODE_X0 = 16,
+  V2CMPLTSI_IMM8_OPCODE_X1 = 41,
+  V2CMPLTS_RRR_0_OPCODE_X0 = 122,
+  V2CMPLTS_RRR_0_OPCODE_X1 = 78,
+  V2CMPLTUI_IMM8_OPCODE_X0 = 17,
+  V2CMPLTUI_IMM8_OPCODE_X1 = 42,
+  V2CMPLTU_RRR_0_OPCODE_X0 = 123,
+  V2CMPLTU_RRR_0_OPCODE_X1 = 79,
+  V2CMPNE_RRR_0_OPCODE_X0 = 124,
+  V2CMPNE_RRR_0_OPCODE_X1 = 80,
+  V2DOTPA_RRR_0_OPCODE_X0 = 125,
+  V2DOTP_RRR_0_OPCODE_X0 = 126,
+  V2INT_H_RRR_0_OPCODE_X0 = 127,
+  V2INT_H_RRR_0_OPCODE_X1 = 81,
+  V2INT_L_RRR_0_OPCODE_X0 = 128,
+  V2INT_L_RRR_0_OPCODE_X1 = 82,
+  V2MAXSI_IMM8_OPCODE_X0 = 18,
+  V2MAXSI_IMM8_OPCODE_X1 = 43,
+  V2MAXS_RRR_0_OPCODE_X0 = 129,
+  V2MAXS_RRR_0_OPCODE_X1 = 83,
+  V2MINSI_IMM8_OPCODE_X0 = 19,
+  V2MINSI_IMM8_OPCODE_X1 = 44,
+  V2MINS_RRR_0_OPCODE_X0 = 130,
+  V2MINS_RRR_0_OPCODE_X1 = 84,
+  V2MNZ_RRR_0_OPCODE_X0 = 131,
+  V2MNZ_RRR_0_OPCODE_X1 = 85,
+  V2MULFSC_RRR_0_OPCODE_X0 = 132,
+  V2MULS_RRR_0_OPCODE_X0 = 133,
+  V2MULTS_RRR_0_OPCODE_X0 = 134,
+  V2MZ_RRR_0_OPCODE_X0 = 135,
+  V2MZ_RRR_0_OPCODE_X1 = 86,
+  V2PACKH_RRR_0_OPCODE_X0 = 136,
+  V2PACKH_RRR_0_OPCODE_X1 = 87,
+  V2PACKL_RRR_0_OPCODE_X0 = 137,
+  V2PACKL_RRR_0_OPCODE_X1 = 88,
+  V2PACKUC_RRR_0_OPCODE_X0 = 138,
+  V2PACKUC_RRR_0_OPCODE_X1 = 89,
+  V2SADAS_RRR_0_OPCODE_X0 = 139,
+  V2SADAU_RRR_0_OPCODE_X0 = 140,
+  V2SADS_RRR_0_OPCODE_X0 = 141,
+  V2SADU_RRR_0_OPCODE_X0 = 142,
+  V2SHLI_SHIFT_OPCODE_X0 = 10,
+  V2SHLI_SHIFT_OPCODE_X1 = 10,
+  V2SHLSC_RRR_0_OPCODE_X0 = 143,
+  V2SHLSC_RRR_0_OPCODE_X1 = 90,
+  V2SHL_RRR_0_OPCODE_X0 = 144,
+  V2SHL_RRR_0_OPCODE_X1 = 91,
+  V2SHRSI_SHIFT_OPCODE_X0 = 11,
+  V2SHRSI_SHIFT_OPCODE_X1 = 11,
+  V2SHRS_RRR_0_OPCODE_X0 = 145,
+  V2SHRS_RRR_0_OPCODE_X1 = 92,
+  V2SHRUI_SHIFT_OPCODE_X0 = 12,
+  V2SHRUI_SHIFT_OPCODE_X1 = 12,
+  V2SHRU_RRR_0_OPCODE_X0 = 146,
+  V2SHRU_RRR_0_OPCODE_X1 = 93,
+  V2SUBSC_RRR_0_OPCODE_X0 = 147,
+  V2SUBSC_RRR_0_OPCODE_X1 = 94,
+  V2SUB_RRR_0_OPCODE_X0 = 148,
+  V2SUB_RRR_0_OPCODE_X1 = 95,
+  V4ADDSC_RRR_0_OPCODE_X0 = 149,
+  V4ADDSC_RRR_0_OPCODE_X1 = 96,
+  V4ADD_RRR_0_OPCODE_X0 = 150,
+  V4ADD_RRR_0_OPCODE_X1 = 97,
+  V4INT_H_RRR_0_OPCODE_X0 = 151,
+  V4INT_H_RRR_0_OPCODE_X1 = 98,
+  V4INT_L_RRR_0_OPCODE_X0 = 152,
+  V4INT_L_RRR_0_OPCODE_X1 = 99,
+  V4PACKSC_RRR_0_OPCODE_X0 = 153,
+  V4PACKSC_RRR_0_OPCODE_X1 = 100,
+  V4SHLSC_RRR_0_OPCODE_X0 = 154,
+  V4SHLSC_RRR_0_OPCODE_X1 = 101,
+  V4SHL_RRR_0_OPCODE_X0 = 155,
+  V4SHL_RRR_0_OPCODE_X1 = 102,
+  V4SHRS_RRR_0_OPCODE_X0 = 156,
+  V4SHRS_RRR_0_OPCODE_X1 = 103,
+  V4SHRU_RRR_0_OPCODE_X0 = 157,
+  V4SHRU_RRR_0_OPCODE_X1 = 104,
+  V4SUBSC_RRR_0_OPCODE_X0 = 158,
+  V4SUBSC_RRR_0_OPCODE_X1 = 105,
+  V4SUB_RRR_0_OPCODE_X0 = 159,
+  V4SUB_RRR_0_OPCODE_X1 = 106,
+  WH64_UNARY_OPCODE_X1 = 38,
+  XORI_IMM8_OPCODE_X0 = 20,
+  XORI_IMM8_OPCODE_X1 = 45,
+  XOR_RRR_0_OPCODE_X0 = 160,
+  XOR_RRR_0_OPCODE_X1 = 107,
+  XOR_RRR_5_OPCODE_Y0 = 3,
+  XOR_RRR_5_OPCODE_Y1 = 3
 };
 
 #endif /* !_TILE_OPCODE_CONSTANTS_H */
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
new file mode 100644
index 0000000..fd80328
--- /dev/null
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ */
+
+#ifndef _ASM_TILE_PGTABLE_64_H
+#define _ASM_TILE_PGTABLE_64_H
+
+/* The level-0 page table breaks the address space into 32-bit chunks. */
+#define PGDIR_SHIFT	HV_LOG2_L1_SPAN
+#define PGDIR_SIZE	HV_L1_SPAN
+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+#define PTRS_PER_PGD	HV_L0_ENTRIES
+#define SIZEOF_PGD	(PTRS_PER_PGD * sizeof(pgd_t))
+
+/*
+ * The level-1 index is defined by the huge page size.  A PMD is composed
+ * of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
+ */
+#define PMD_SHIFT	HV_LOG2_PAGE_SIZE_LARGE
+#define PMD_SIZE	HV_PAGE_SIZE_LARGE
+#define PMD_MASK	(~(PMD_SIZE-1))
+#define PTRS_PER_PMD	(1 << (PGDIR_SHIFT - PMD_SHIFT))
+#define SIZEOF_PMD	(PTRS_PER_PMD * sizeof(pmd_t))
+
+/*
+ * The level-2 index is defined by the difference between the huge
+ * page size and the normal page size.  A PTE is composed of
+ * PTRS_PER_PTE pte_t's and is the bottom level of the page table.
+ * Note that the hypervisor docs use PTE for what we call pte_t, so
+ * this nomenclature is somewhat confusing.
+ */
+#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
+#define SIZEOF_PTE	(PTRS_PER_PTE * sizeof(pte_t))
+
+/*
+ * Align the vmalloc area to an L2 page table, and leave a guard page
+ * at the beginning and end.  The vmalloc code also puts in an internal
+ * guard page between each allocation.
+ */
+#define _VMALLOC_END	HUGE_VMAP_BASE
+#define VMALLOC_END	(_VMALLOC_END - PAGE_SIZE)
+#define VMALLOC_START	(_VMALLOC_START + PAGE_SIZE)
+
+#define HUGE_VMAP_END	(HUGE_VMAP_BASE + PGDIR_SIZE)
+
+#ifndef __ASSEMBLY__
+
+/* We have no pud since we are a three-level page table. */
+#include <asm-generic/pgtable-nopud.h>
+
+static inline int pud_none(pud_t pud)
+{
+	return pud_val(pud) == 0;
+}
+
+static inline int pud_present(pud_t pud)
+{
+	return pud_val(pud) & _PAGE_PRESENT;
+}
+
+#define pmd_ERROR(e) \
+	pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e))
+
+static inline void pud_clear(pud_t *pudp)
+{
+	__pte_clear(&pudp->pgd);
+}
+
+static inline int pud_bad(pud_t pud)
+{
+	return ((pud_val(pud) & _PAGE_ALL) != _PAGE_TABLE);
+}
+
+/* Return the page-table frame number (ptfn) that a pud_t points at. */
+#define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd)
+
+/*
+ * A given kernel pud_t maps to a kernel pmd_t table at a specific
+ * virtual address.  Since kernel pmd_t tables can be aligned at
+ * sub-page granularity, this macro can return non-page-aligned
+ * pointers, despite its name.
+ */
+#define pud_page_vaddr(pud) \
+	(__va((phys_addr_t)pud_ptfn(pud) << HV_LOG2_PAGE_TABLE_ALIGN))
+
+/*
+ * A pud_t points to a pmd_t array.  Since we can have multiple per
+ * page, we don't have a one-to-one mapping of pud_t's to pages.
+ */
+#define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud)))
+
+static inline unsigned long pud_index(unsigned long address)
+{
+	return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
+}
+
+#define pmd_offset(pud, address) \
+	((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
+
+static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+	set_pte(pmdp, pmdval);
+}
+
+/* Create a pmd from a PTFN and pgprot. */
+static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
+{
+	return hv_pte_set_ptfn(prot, ptfn);
+}
+
+/* Return the page-table frame number (ptfn) that a pmd_t points at. */
+static inline unsigned long pmd_ptfn(pmd_t pmd)
+{
+	return hv_pte_get_ptfn(pmd);
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+	__pte_clear(pmdp);
+}
+
+/* Normalize an address to having the correct high bits set. */
+#define pgd_addr_normalize pgd_addr_normalize
+static inline unsigned long pgd_addr_normalize(unsigned long addr)
+{
+	return ((long)addr << (CHIP_WORD_SIZE() - CHIP_VA_WIDTH())) >>
+		(CHIP_WORD_SIZE() - CHIP_VA_WIDTH());
+}
+
+/* We don't define any pgds for these addresses. */
+static inline int pgd_addr_invalid(unsigned long addr)
+{
+	return addr >= MEM_HV_START ||
+		(addr > MEM_LOW_END && addr < MEM_HIGH_START);
+}
+
+/*
+ * Use atomic instructions to provide atomicity against the hypervisor.
+ */
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long addr, pte_t *ptep)
+{
+	return (__insn_fetchand(&ptep->val, ~HV_PTE_ACCESSED) >>
+		HV_PTE_INDEX_ACCESSED) & 0x1;
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm,
+				      unsigned long addr, pte_t *ptep)
+{
+	__insn_fetchand(&ptep->val, ~HV_PTE_WRITABLE);
+}
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+				       unsigned long addr, pte_t *ptep)
+{
+	return hv_pte(__insn_exch(&ptep->val, 0UL));
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_TILE_PGTABLE_64_H */
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
new file mode 100644
index 0000000..72be590
--- /dev/null
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere
+ * (the type definitions are in asm/spinlock_types.h)
+ */
+
+#ifndef _ASM_TILE_SPINLOCK_64_H
+#define _ASM_TILE_SPINLOCK_64_H
+
+/* Shifts and masks for the various fields in "lock". */
+#define __ARCH_SPIN_CURRENT_SHIFT	17
+#define __ARCH_SPIN_NEXT_MASK		0x7fff
+#define __ARCH_SPIN_NEXT_OVERFLOW	0x8000
+
+/*
+ * Return the "current" portion of a ticket lock value,
+ * i.e. the number that currently owns the lock.
+ */
+static inline int arch_spin_current(u32 val)
+{
+	return val >> __ARCH_SPIN_CURRENT_SHIFT;
+}
+
+/*
+ * Return the "next" portion of a ticket lock value,
+ * i.e. the number that the next task to try to acquire the lock will get.
+ */
+static inline int arch_spin_next(u32 val)
+{
+	return val & __ARCH_SPIN_NEXT_MASK;
+}
+
+/* The lock is locked if a task would have to wait to get it. */
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+	u32 val = lock->lock;
+	return arch_spin_current(val) != arch_spin_next(val);
+}
+
+/* Bump the current ticket so the next task owns the lock. */
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+	wmb();  /* guarantee anything modified under the lock is visible */
+	__insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT);
+}
+
+void arch_spin_unlock_wait(arch_spinlock_t *lock);
+
+void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val);
+
+/* Grab the "next" ticket number and bump it atomically.
+ * If the current ticket is not ours, go to the slow path.
+ * We also take the slow path if the "next" value overflows.
+ */
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+	u32 val = __insn_fetchadd4(&lock->lock, 1);
+	u32 ticket = val & (__ARCH_SPIN_NEXT_MASK | __ARCH_SPIN_NEXT_OVERFLOW);
+	if (unlikely(arch_spin_current(val) != ticket))
+		arch_spin_lock_slow(lock, ticket);
+}
+
+/* Try to get the lock, and return whether we succeeded. */
+int arch_spin_trylock(arch_spinlock_t *lock);
+
+/* We cannot take an interrupt after getting a ticket, so don't enable them. */
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * We use fetchadd() for readers, and fetchor() with the sign bit
+ * for writers.
+ */
+
+#define __WRITE_LOCK_BIT (1 << 31)
+
+static inline int arch_write_val_locked(int val)
+{
+	return val < 0;  /* Optimize "val & __WRITE_LOCK_BIT". */
+}
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+static inline int arch_read_can_lock(arch_rwlock_t *rw)
+{
+	return !arch_write_val_locked(rw->lock);
+}
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+static inline int arch_write_can_lock(arch_rwlock_t *rw)
+{
+	return rw->lock == 0;
+}
+
+extern void __read_lock_failed(arch_rwlock_t *rw);
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+	u32 val = __insn_fetchaddgez4(&rw->lock, 1);
+	if (unlikely(arch_write_val_locked(val)))
+		__read_lock_failed(rw);
+}
+
+extern void __write_lock_failed(arch_rwlock_t *rw, u32 val);
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+	u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
+	if (unlikely(val != 0))
+		__write_lock_failed(rw, val);
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+	__insn_mf();
+	__insn_fetchadd4(&rw->lock, -1);
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+	__insn_mf();
+	rw->lock = 0;
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+	return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1));
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+	u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
+	if (likely(val == 0))
+		return 1;
+	if (!arch_write_val_locked(val))
+		__insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT);
+	return 0;
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#endif /* _ASM_TILE_SPINLOCK_64_H */