x86_64: Add long bytecode supports (1/2)

This patch includes switch enabling and GenFillArray,
assembler changes, updates of regalloc behavior for 64-bit,
usage in basic utility operations, loading constants,
and update for memory operations.

Change-Id: I6d8aa35a75c5fd01d69c38a770c3398d0188cc8a
Signed-off-by: Chao-ying Fu <chao-ying.fu@intel.com>
Signed-off-by: Serguei Katkov <serguei.i.katkov@intel.com>
Signed-off-by: Dmitry Petrochenko <dmitry.petrochenko@intel.com>
Signed-off-by: Mark Mendell <mark.p.mendell@intel.com>
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 160ec62..100ed13 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -687,6 +687,14 @@
 }
 
 RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
+  // X86_64 can handle any size.
+  if (Gen64Bit()) {
+    if (size == kReference) {
+      return kRefReg;
+    }
+    return kCoreReg;
+  }
+
   if (UNLIKELY(is_volatile)) {
     // On x86, atomic 64-bit load/store requires an fp register.
     // Smaller aligned load/store is atomic for both core and fp registers.
@@ -1425,7 +1433,11 @@
 
   // Address the start of the method.
   RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
-  rl_method = LoadValue(rl_method, kCoreReg);
+  if (rl_method.wide) {
+    rl_method = LoadValueWide(rl_method, kCoreReg);
+  } else {
+    rl_method = LoadValue(rl_method, kCoreReg);
+  }
 
   // Load the proper value from the literal area.
   // We don't know the proper offset for the value, so pick one that will force