art: Handle x86_64 architecture equal to x86

This patch forces FE/ME to treat x86_64 as x86 exactly.
The x86_64 logic will be revised later when assembly will be ready.

Change-Id: I4a92477a6eeaa9a11fd710d35c602d8d6f88cbb6
Signed-off-by: Dmitry Petrochenko <dmitry.petrochenko@intel.com>
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 352130d..188b18c 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -67,7 +67,7 @@
  * load arguments between the two parts.
  */
 RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<4> helper_offset) {
-  return (cu_->instruction_set == kX86) ? RegStorage::InvalidReg() : LoadHelper(helper_offset);
+  return (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) ? RegStorage::InvalidReg() : LoadHelper(helper_offset);
 }
 
 /* NOTE: if r_tgt is a temp, it will be freed following use */
@@ -75,7 +75,7 @@
                          bool use_link) {
   LIR* call_inst;
   OpKind op = use_link ? kOpBlx : kOpBx;
-  if (cu_->instruction_set == kX86) {
+  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
     call_inst = OpThreadMem(op, helper_offset);
   } else {
     call_inst = OpReg(op, r_tgt);
@@ -435,10 +435,10 @@
     switch (state) {
     case 0:  // Get the current Method* [sets kArg0]
       if (direct_code != static_cast<unsigned int>(-1)) {
-        if (cu->instruction_set != kX86) {
+        if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
           cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
         }
-      } else if (cu->instruction_set != kX86) {
+      } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
         cg->LoadCodeAddress(target_method, type, kInvokeTgt);
       }
       if (direct_method != static_cast<unsigned int>(-1)) {
@@ -464,7 +464,7 @@
       if (direct_code != 0) {
         if (direct_code != static_cast<unsigned int>(-1)) {
           cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
-        } else if (cu->instruction_set != kX86) {
+        } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
           CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
           cg->LoadCodeAddress(target_method, type, kInvokeTgt);
         }
@@ -477,7 +477,7 @@
                        (target_method.dex_method_index * 4), cg->TargetReg(kArg0));
       break;
     case 3:  // Grab the code from the method*
-      if (cu->instruction_set != kX86) {
+      if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
         if (direct_code == 0) {
           cg->LoadWordDisp(cg->TargetReg(kArg0),
                            mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
@@ -532,7 +532,7 @@
                        cg->TargetReg(kArg0));
       break;
     case 4:  // Get the compiled code address [uses kArg0, sets kInvokeTgt]
-      if (cu->instruction_set != kX86) {
+      if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
         cg->LoadWordDisp(cg->TargetReg(kArg0),
                          mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
                          cg->TargetReg(kInvokeTgt));
@@ -561,7 +561,7 @@
     case 0:  // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
       CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
       cg->LoadConstant(cg->TargetReg(kHiddenArg), target_method.dex_method_index);
-      if (cu->instruction_set == kX86) {
+      if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64) {
         cg->OpRegCopy(cg->TargetReg(kHiddenFpArg), cg->TargetReg(kHiddenArg));
       }
       break;
@@ -587,7 +587,7 @@
                        cg->TargetReg(kArg0));
       break;
     case 5:  // Get the compiled code address [use kArg0, set kInvokeTgt]
-      if (cu->instruction_set != kX86) {
+      if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
         cg->LoadWordDisp(cg->TargetReg(kArg0),
                          mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
                          cg->TargetReg(kInvokeTgt));
@@ -609,7 +609,7 @@
    * resolved at compile time, we bail to a runtime helper.
    */
   if (state == 0) {
-    if (cu->instruction_set != kX86) {
+    if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
       // Load trampoline target
       cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), cg->TargetReg(kInvokeTgt));
     }
@@ -879,7 +879,7 @@
     st->u.m.def_mask = ENCODE_ALL;
     call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
                              direct_code, direct_method, type);
-  } else if (cu_->instruction_set == kX86) {
+  } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
     int current_src_offset = start_offset;
     int current_dest_offset = outs_offset;
 
@@ -1054,7 +1054,7 @@
   RegLocation rl_idx = info->args[1];
   rl_obj = LoadValue(rl_obj, kCoreReg);
   // X86 wants to avoid putting a constant index into a register.
-  if (!(cu_->instruction_set == kX86 && rl_idx.is_const)) {
+  if (!((cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64)&& rl_idx.is_const)) {
     rl_idx = LoadValue(rl_idx, kCoreReg);
   }
   RegStorage reg_max;
@@ -1063,7 +1063,7 @@
   LIR* range_check_branch = nullptr;
   RegStorage reg_off;
   RegStorage reg_ptr;
-  if (cu_->instruction_set != kX86) {
+  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
     reg_off = AllocTemp();
     reg_ptr = AllocTemp();
     if (range_check) {
@@ -1110,7 +1110,7 @@
   }
   RegLocation rl_dest = InlineTarget(info);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  if (cu_->instruction_set != kX86) {
+  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
     LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
   } else {
     LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg,
@@ -1148,7 +1148,7 @@
       OpRegReg(kOpNeg, t_reg, rl_result.reg);
       OpRegRegReg(kOpAdc, rl_result.reg, rl_result.reg, t_reg);
     } else {
-      DCHECK_EQ(cu_->instruction_set, kX86);
+      DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
       OpRegImm(kOpSub, rl_result.reg, 1);
       OpRegImm(kOpLsr, rl_result.reg, 31);
     }
@@ -1218,7 +1218,7 @@
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
 
   // If on x86 or if we would clobber a register needed later, just copy the source first.
-  if (cu_->instruction_set == kX86 || rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
+  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64 || rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
     OpRegCopyWide(rl_result.reg, rl_src.reg);
     if (rl_result.reg.GetLowReg() != rl_src.reg.GetLowReg() &&
         rl_result.reg.GetLowReg() != rl_src.reg.GetHighReg() &&
@@ -1359,7 +1359,7 @@
   RegLocation rl_cmp = info->args[1];
   LoadValueDirectFixed(rl_this, reg_this);
   LoadValueDirectFixed(rl_cmp, reg_cmp);
-  RegStorage r_tgt = (cu_->instruction_set != kX86) ?
+  RegStorage r_tgt = (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) ?
       LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo)) : RegStorage::InvalidReg();
   GenExplicitNullCheck(reg_this, info->opt_flags);
   info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
@@ -1367,7 +1367,7 @@
   LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
   AddIntrinsicLaunchpad(info, cmp_null_check_branch);
   // NOTE: not a safepoint
-  if (cu_->instruction_set != kX86) {
+  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
     OpReg(kOpBlx, r_tgt);
   } else {
     OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
@@ -1385,7 +1385,7 @@
   if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
     LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg);
   } else {
-    CHECK(cu_->instruction_set == kX86);
+    CHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
     reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset);
   }
   StoreValue(rl_dest, rl_result);
@@ -1556,7 +1556,7 @@
                                 method_info.DirectCode(), method_info.DirectMethod(), original_type);
   }
   LIR* call_inst;
-  if (cu_->instruction_set != kX86) {
+  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
     call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
   } else {
     if (fast_path) {