art: Handle x86_64 architecture equal to x86
This patch forces FE/ME to treat x86_64 as x86 exactly.
The x86_64 logic will be revised later when assembly will be ready.
Change-Id: I4a92477a6eeaa9a11fd710d35c602d8d6f88cbb6
Signed-off-by: Dmitry Petrochenko <dmitry.petrochenko@intel.com>
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index 8f64408..4bdc9fa 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -100,7 +100,7 @@
}
int native_reg_id;
- if (cu_->instruction_set == kX86) {
+ if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
// If x86, location differs depending on whether memory/reg operation.
native_reg_id = (target_flags & IS_STORE) ? this_lir->operands[2] : this_lir->operands[0];
} else {
@@ -121,7 +121,7 @@
uint64_t stop_def_reg_mask = this_lir->u.m.def_mask & ~ENCODE_MEM;
uint64_t stop_use_reg_mask;
- if (cu_->instruction_set == kX86) {
+ if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
stop_use_reg_mask = (IS_BRANCH | this_lir->u.m.use_mask) & ~ENCODE_MEM;
} else {
/*
@@ -241,7 +241,7 @@
}
if (stop_here == true) {
- if (cu_->instruction_set == kX86) {
+ if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
// Prevent stores from being sunk between ops that generate ccodes and
// ops that use them.
uint64_t flags = GetTargetInstFlags(check_lir->opcode);
@@ -306,7 +306,7 @@
uint64_t stop_use_all_mask = this_lir->u.m.use_mask;
- if (cu_->instruction_set != kX86) {
+ if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
/*
* Branches for null/range checks are marked with the true resource
* bits, and loads to Dalvik registers, constant pools, and non-alias