Merge "Inline across dex files."
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index beb5755..8b31154 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -217,20 +217,21 @@
auto* const mapping_table = compiled_method->GetMappingTable();
auto* const vmap_table = compiled_method->GetVmapTable();
auto* const gc_map = compiled_method->GetGcMap();
+ CHECK(gc_map != nullptr) << PrettyMethod(method);
// Write out pre-header stuff.
uint8_t* const mapping_table_ptr = code_cache->AddDataArray(
self, mapping_table->data(), mapping_table->data() + mapping_table->size());
- if (mapping_table == nullptr) {
+ if (mapping_table_ptr == nullptr) {
return false; // Out of data cache.
}
uint8_t* const vmap_table_ptr = code_cache->AddDataArray(
self, vmap_table->data(), vmap_table->data() + vmap_table->size());
- if (vmap_table == nullptr) {
+ if (vmap_table_ptr == nullptr) {
return false; // Out of data cache.
}
uint8_t* const gc_map_ptr = code_cache->AddDataArray(
self, gc_map->data(), gc_map->data() + gc_map->size());
- if (gc_map == nullptr) {
+ if (gc_map_ptr == nullptr) {
return false; // Out of data cache.
}
// Don't touch this until you protect / unprotect the code.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index aeec5dd..99283a0 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -63,6 +63,7 @@
using helpers::VIXLRegCodeFromART;
using helpers::WRegisterFrom;
using helpers::XRegisterFrom;
+using helpers::ARM64EncodableConstantOrRegister;
static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
static constexpr int kCurrentMethodStackOffset = 0;
@@ -1106,7 +1107,7 @@
case Primitive::kPrimInt:
case Primitive::kPrimLong:
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
+ locations->SetInAt(1, ARM64EncodableConstantOrRegister(instr->InputAt(1), instr));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
@@ -1398,7 +1399,7 @@
switch (in_type) {
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(compare->InputAt(1)));
+ locations->SetInAt(1, ARM64EncodableConstantOrRegister(compare->InputAt(1), compare));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
@@ -1468,7 +1469,7 @@
void LocationsBuilderARM64::VisitCondition(HCondition* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
if (instruction->NeedsMaterialization()) {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -2116,7 +2117,7 @@
switch (neg->GetResultType()) {
case Primitive::kPrimInt:
case Primitive::kPrimLong:
- locations->SetInAt(0, Location::RegisterOrConstant(neg->InputAt(0)));
+ locations->SetInAt(0, ARM64EncodableConstantOrRegister(neg->InputAt(0), neg));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 754dd10..02b9b32 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2730,26 +2730,45 @@
Label less, greater, done;
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
+ Register left_low = left.AsRegisterPairLow<Register>();
+ Register left_high = left.AsRegisterPairHigh<Register>();
+ int32_t val_low = 0;
+ int32_t val_high = 0;
+ bool right_is_const = false;
+
+ if (right.IsConstant()) {
+ DCHECK(right.GetConstant()->IsLongConstant());
+ right_is_const = true;
+ int64_t val = right.GetConstant()->AsLongConstant()->GetValue();
+ val_low = Low32Bits(val);
+ val_high = High32Bits(val);
+ }
+
if (right.IsRegisterPair()) {
- __ cmpl(left.AsRegisterPairHigh<Register>(), right.AsRegisterPairHigh<Register>());
+ __ cmpl(left_high, right.AsRegisterPairHigh<Register>());
} else if (right.IsDoubleStackSlot()) {
- __ cmpl(left.AsRegisterPairHigh<Register>(),
- Address(ESP, right.GetHighStackIndex(kX86WordSize)));
+ __ cmpl(left_high, Address(ESP, right.GetHighStackIndex(kX86WordSize)));
} else {
- DCHECK(right.IsConstant()) << right;
- __ cmpl(left.AsRegisterPairHigh<Register>(),
- Immediate(High32Bits(right.GetConstant()->AsLongConstant()->GetValue())));
+ DCHECK(right_is_const) << right;
+ if (val_high == 0) {
+ __ testl(left_high, left_high);
+ } else {
+ __ cmpl(left_high, Immediate(val_high));
+ }
}
__ j(kLess, &less); // Signed compare.
__ j(kGreater, &greater); // Signed compare.
if (right.IsRegisterPair()) {
- __ cmpl(left.AsRegisterPairLow<Register>(), right.AsRegisterPairLow<Register>());
+ __ cmpl(left_low, right.AsRegisterPairLow<Register>());
} else if (right.IsDoubleStackSlot()) {
- __ cmpl(left.AsRegisterPairLow<Register>(), Address(ESP, right.GetStackIndex()));
+ __ cmpl(left_low, Address(ESP, right.GetStackIndex()));
} else {
- DCHECK(right.IsConstant()) << right;
- __ cmpl(left.AsRegisterPairLow<Register>(),
- Immediate(Low32Bits(right.GetConstant()->AsLongConstant()->GetValue())));
+ DCHECK(right_is_const) << right;
+ if (val_low == 0) {
+ __ testl(left_low, left_low);
+ } else {
+ __ cmpl(left_low, Immediate(val_low));
+ }
}
break;
}
@@ -3645,14 +3664,21 @@
__ movl(Address(ESP, destination.GetStackIndex()), Immediate(value));
}
} else if (constant->IsFloatConstant()) {
- float value = constant->AsFloatConstant()->GetValue();
- Immediate imm(bit_cast<float, int32_t>(value));
+ float fp_value = constant->AsFloatConstant()->GetValue();
+ int32_t value = bit_cast<float, int32_t>(fp_value);
+ Immediate imm(value);
if (destination.IsFpuRegister()) {
- ScratchRegisterScope ensure_scratch(
- this, kNoRegister, EAX, codegen_->GetNumberOfCoreRegisters());
- Register temp = static_cast<Register>(ensure_scratch.GetRegister());
- __ movl(temp, imm);
- __ movd(destination.AsFpuRegister<XmmRegister>(), temp);
+ XmmRegister dest = destination.AsFpuRegister<XmmRegister>();
+ if (value == 0) {
+ // Easy handling of 0.0.
+ __ xorps(dest, dest);
+ } else {
+ ScratchRegisterScope ensure_scratch(
+ this, kNoRegister, EAX, codegen_->GetNumberOfCoreRegisters());
+ Register temp = static_cast<Register>(ensure_scratch.GetRegister());
+ __ movl(temp, Immediate(value));
+ __ movd(dest, temp);
+ }
} else {
DCHECK(destination.IsStackSlot()) << destination;
__ movl(Address(ESP, destination.GetStackIndex()), imm);
@@ -4107,18 +4133,38 @@
} else {
DCHECK(second.IsConstant()) << second;
int64_t value = second.GetConstant()->AsLongConstant()->GetValue();
- Immediate low(Low32Bits(value));
- Immediate high(High32Bits(value));
+ int32_t low_value = Low32Bits(value);
+ int32_t high_value = High32Bits(value);
+ Immediate low(low_value);
+ Immediate high(high_value);
+ Register first_low = first.AsRegisterPairLow<Register>();
+ Register first_high = first.AsRegisterPairHigh<Register>();
if (instruction->IsAnd()) {
- __ andl(first.AsRegisterPairLow<Register>(), low);
- __ andl(first.AsRegisterPairHigh<Register>(), high);
+ if (low_value == 0) {
+ __ xorl(first_low, first_low);
+ } else if (low_value != -1) {
+ __ andl(first_low, low);
+ }
+ if (high_value == 0) {
+ __ xorl(first_high, first_high);
+ } else if (high_value != -1) {
+ __ andl(first_high, high);
+ }
} else if (instruction->IsOr()) {
- __ orl(first.AsRegisterPairLow<Register>(), low);
- __ orl(first.AsRegisterPairHigh<Register>(), high);
+ if (low_value != 0) {
+ __ orl(first_low, low);
+ }
+ if (high_value != 0) {
+ __ orl(first_high, high);
+ }
} else {
DCHECK(instruction->IsXor());
- __ xorl(first.AsRegisterPairLow<Register>(), low);
- __ xorl(first.AsRegisterPairHigh<Register>(), high);
+ if (low_value != 0) {
+ __ xorl(first_low, low);
+ }
+ if (high_value != 0) {
+ __ xorl(first_high, high);
+ }
}
}
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index dbd7c9e..d09c8f8 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -956,7 +956,7 @@
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrInt32LongConstant(compare->InputAt(1)));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
@@ -982,7 +982,18 @@
Primitive::Type type = compare->InputAt(0)->GetType();
switch (type) {
case Primitive::kPrimLong: {
- __ cmpq(left.AsRegister<CpuRegister>(), right.AsRegister<CpuRegister>());
+ CpuRegister left_reg = left.AsRegister<CpuRegister>();
+ if (right.IsConstant()) {
+ int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(value));
+ if (value == 0) {
+ __ testq(left_reg, left_reg);
+ } else {
+ __ cmpq(left_reg, Immediate(static_cast<int32_t>(value)));
+ }
+ } else {
+ __ cmpq(left_reg, right.AsRegister<CpuRegister>());
+ }
break;
}
case Primitive::kPrimFloat: {
@@ -1865,17 +1876,7 @@
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
// We can use a leaq or addq if the constant can fit in an immediate.
- HInstruction* rhs = add->InputAt(1);
- bool is_int32_constant = false;
- if (rhs->IsLongConstant()) {
- int64_t value = rhs->AsLongConstant()->GetValue();
- if (static_cast<int32_t>(value) == value) {
- is_int32_constant = true;
- }
- }
- locations->SetInAt(1,
- is_int32_constant ? Location::RegisterOrConstant(rhs) :
- Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrInt32LongConstant(add->InputAt(1)));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
@@ -1973,7 +1974,7 @@
}
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrInt32LongConstant(sub->InputAt(1)));
locations->SetOut(Location::SameAsFirstInput());
break;
}
@@ -2007,7 +2008,13 @@
break;
}
case Primitive::kPrimLong: {
- __ subq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ if (second.IsConstant()) {
+ int64_t value = second.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(value));
+ __ subq(first.AsRegister<CpuRegister>(), Immediate(static_cast<int32_t>(value)));
+ } else {
+ __ subq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ }
break;
}
@@ -2038,8 +2045,13 @@
}
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
+ locations->SetInAt(1, Location::RegisterOrInt32LongConstant(mul->InputAt(1)));
+ if (locations->InAt(1).IsConstant()) {
+ // Can use 3 operand multiply.
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
break;
}
case Primitive::kPrimFloat:
@@ -2059,9 +2071,9 @@
LocationSummary* locations = mul->GetLocations();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
- DCHECK(first.Equals(locations->Out()));
switch (mul->GetResultType()) {
case Primitive::kPrimInt: {
+ DCHECK(first.Equals(locations->Out()));
if (second.IsRegister()) {
__ imull(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
} else if (second.IsConstant()) {
@@ -2075,16 +2087,27 @@
break;
}
case Primitive::kPrimLong: {
- __ imulq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ if (second.IsConstant()) {
+ int64_t value = second.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(value));
+ __ imulq(locations->Out().AsRegister<CpuRegister>(),
+ first.AsRegister<CpuRegister>(),
+ Immediate(static_cast<int32_t>(value)));
+ } else {
+ DCHECK(first.Equals(locations->Out()));
+ __ imulq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ }
break;
}
case Primitive::kPrimFloat: {
+ DCHECK(first.Equals(locations->Out()));
__ mulss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
case Primitive::kPrimDouble: {
+ DCHECK(first.Equals(locations->Out()));
__ mulsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
@@ -3320,20 +3343,35 @@
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP));
}
} else if (constant->IsFloatConstant()) {
- Immediate imm(bit_cast<float, int32_t>(constant->AsFloatConstant()->GetValue()));
+ float fp_value = constant->AsFloatConstant()->GetValue();
+ int32_t value = bit_cast<float, int32_t>(fp_value);
+ Immediate imm(value);
if (destination.IsFpuRegister()) {
- __ movl(CpuRegister(TMP), imm);
- __ movd(destination.AsFpuRegister<XmmRegister>(), CpuRegister(TMP));
+ XmmRegister dest = destination.AsFpuRegister<XmmRegister>();
+ if (value == 0) {
+ // easy FP 0.0.
+ __ xorps(dest, dest);
+ } else {
+ __ movl(CpuRegister(TMP), imm);
+ __ movd(dest, CpuRegister(TMP));
+ }
} else {
DCHECK(destination.IsStackSlot()) << destination;
__ movl(Address(CpuRegister(RSP), destination.GetStackIndex()), imm);
}
} else {
DCHECK(constant->IsDoubleConstant()) << constant->DebugName();
- Immediate imm(bit_cast<double, int64_t>(constant->AsDoubleConstant()->GetValue()));
+ double fp_value = constant->AsDoubleConstant()->GetValue();
+ int64_t value = bit_cast<double, int64_t>(fp_value);
+ Immediate imm(value);
if (destination.IsFpuRegister()) {
- __ movq(CpuRegister(TMP), imm);
- __ movd(destination.AsFpuRegister<XmmRegister>(), CpuRegister(TMP));
+ XmmRegister dest = destination.AsFpuRegister<XmmRegister>();
+ if (value == 0) {
+ __ xorpd(dest, dest);
+ } else {
+ __ movq(CpuRegister(TMP), imm);
+ __ movd(dest, CpuRegister(TMP));
+ }
} else {
DCHECK(destination.IsDoubleStackSlot()) << destination;
__ movq(CpuRegister(TMP), imm);
@@ -3673,8 +3711,9 @@
if (instruction->GetType() == Primitive::kPrimInt) {
locations->SetInAt(1, Location::Any());
} else {
- // Request a register to avoid loading a 64bits constant.
+ // We can handle 32 bit constants.
locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrInt32LongConstant(instruction->InputAt(1)));
}
locations->SetOut(Location::SameAsFirstInput());
}
@@ -3730,13 +3769,34 @@
}
} else {
DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ CpuRegister first_reg = first.AsRegister<CpuRegister>();
+ bool second_is_constant = false;
+ int64_t value = 0;
+ if (second.IsConstant()) {
+ second_is_constant = true;
+ value = second.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(value));
+ }
+
if (instruction->IsAnd()) {
- __ andq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ if (second_is_constant) {
+ __ andq(first_reg, Immediate(static_cast<int32_t>(value)));
+ } else {
+ __ andq(first_reg, second.AsRegister<CpuRegister>());
+ }
} else if (instruction->IsOr()) {
- __ orq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ if (second_is_constant) {
+ __ orq(first_reg, Immediate(static_cast<int32_t>(value)));
+ } else {
+ __ orq(first_reg, second.AsRegister<CpuRegister>());
+ }
} else {
DCHECK(instruction->IsXor());
- __ xorq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ if (second_is_constant) {
+ __ xorq(first_reg, Immediate(static_cast<int32_t>(value)));
+ } else {
+ __ xorq(first_reg, second.AsRegister<CpuRegister>());
+ }
}
}
}
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 9447d3b..fd8c0c6 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -183,6 +183,40 @@
}
}
+static bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr) {
+ DCHECK(constant->IsIntConstant() || constant->IsLongConstant() || constant->IsNullConstant());
+
+ // For single uses we let VIXL handle the constant generation since it will
+ // use registers that are not managed by the register allocator (wip0, wip1).
+ if (constant->GetUses().HasOnlyOneUse()) {
+ return true;
+ }
+
+ int64_t value = CodeGenerator::GetInt64ValueOf(constant);
+
+ if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() || instr->IsCompare()) {
+ // Uses aliases of ADD/SUB instructions.
+ return vixl::Assembler::IsImmAddSub(value);
+ } else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
+ // Uses logical operations.
+ return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
+ } else {
+ DCHECK(instr->IsNeg());
+ // Uses mov -immediate.
+ return vixl::Assembler::IsImmMovn(value, vixl::kXRegSize);
+ }
+}
+
+static inline Location ARM64EncodableConstantOrRegister(HInstruction* constant,
+ HInstruction* instr) {
+ if (constant->IsConstant()
+ && CanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
+ return Location::ConstantLocation(constant->AsConstant());
+ }
+
+ return Location::RequiresRegister();
+}
+
} // namespace helpers
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index 4ac1fe8..a1ae670 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -56,6 +56,19 @@
: Location::RequiresRegister();
}
+Location Location::RegisterOrInt32LongConstant(HInstruction* instruction) {
+ if (!instruction->IsConstant() || !instruction->AsConstant()->IsLongConstant()) {
+ return Location::RequiresRegister();
+ }
+
+ // Does the long constant fit in a 32 bit int?
+ int64_t value = instruction->AsConstant()->AsLongConstant()->GetValue();
+
+ return IsInt<32>(value)
+ ? Location::ConstantLocation(instruction->AsConstant())
+ : Location::RequiresRegister();
+}
+
Location Location::ByteRegisterOrConstant(int reg, HInstruction* instruction) {
return instruction->IsConstant()
? Location::ConstantLocation(instruction->AsConstant())
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 566c0da..de876be 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -345,6 +345,7 @@
}
static Location RegisterOrConstant(HInstruction* instruction);
+ static Location RegisterOrInt32LongConstant(HInstruction* instruction);
static Location ByteRegisterOrConstant(int reg, HInstruction* instruction);
// The location of the first input to the instruction will be
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index b70f925..933a8a0 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -462,6 +462,16 @@
return nullptr;
}
+ // Implementation of the space filter: do not compile a code item whose size in
+ // code units is bigger than 256.
+ static constexpr size_t kSpaceFilterOptimizingThreshold = 256;
+ const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
+ if ((compiler_options.GetCompilerFilter() == CompilerOptions::kSpace)
+ && (code_item->insns_size_in_code_units_ > kSpaceFilterOptimizingThreshold)) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledSpaceFilter);
+ return nullptr;
+ }
+
DexCompilationUnit dex_compilation_unit(
nullptr, class_loader, art::Runtime::Current()->GetClassLinker(), dex_file, code_item,
class_def_idx, method_idx, access_flags,
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 3ebf0f8..22ec2a5 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -38,6 +38,7 @@
kNotCompiledUnresolvedMethod,
kNotCompiledUnresolvedField,
kNotCompiledNonSequentialRegPair,
+ kNotCompiledSpaceFilter,
kNotOptimizedTryCatch,
kNotOptimizedDisabled,
kNotCompiledCantAccesType,
@@ -96,6 +97,7 @@
case kNotOptimizedDisabled : return "kNotOptimizedDisabled";
case kNotOptimizedTryCatch : return "kNotOptimizedTryCatch";
case kNotCompiledCantAccesType : return "kNotCompiledCantAccesType";
+ case kNotCompiledSpaceFilter : return "kNotCompiledSpaceFilter";
case kNotOptimizedRegisterAllocator : return "kNotOptimizedRegisterAllocator";
case kNotCompiledUnhandledInstruction : return "kNotCompiledUnhandledInstruction";
case kRemovedCheckedCast: return "kRemovedCheckedCast";
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index f2704b7..bd155ed 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1277,6 +1277,14 @@
}
+void X86_64Assembler::orq(CpuRegister dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ CHECK(imm.is_int32()); // orq only supports 32b immediate.
+ EmitRex64(dst);
+ EmitComplex(1, Operand(dst), imm);
+}
+
+
void X86_64Assembler::orq(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitRex64(dst, src);
@@ -1548,27 +1556,30 @@
void X86_64Assembler::imulq(CpuRegister reg, const Immediate& imm) {
+ imulq(reg, reg, imm);
+}
+
+void X86_64Assembler::imulq(CpuRegister dst, CpuRegister reg, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK(imm.is_int32()); // imulq only supports 32b immediate.
- EmitRex64(reg, reg);
+ EmitRex64(dst, reg);
// See whether imm can be represented as a sign-extended 8bit value.
int64_t v64 = imm.value();
if (IsInt<8>(v64)) {
// Sign-extension works.
EmitUint8(0x6B);
- EmitOperand(reg.LowBits(), Operand(reg));
+ EmitOperand(dst.LowBits(), Operand(reg));
EmitUint8(static_cast<uint8_t>(v64 & 0xFF));
} else {
// Not representable, use full immediate.
EmitUint8(0x69);
- EmitOperand(reg.LowBits(), Operand(reg));
+ EmitOperand(dst.LowBits(), Operand(reg));
EmitImmediate(imm);
}
}
-
void X86_64Assembler::imulq(CpuRegister reg, const Address& address) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitRex64(reg, address);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 5dfcf45..495f74f 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -429,6 +429,7 @@
void orl(CpuRegister dst, CpuRegister src);
void orl(CpuRegister reg, const Address& address);
void orq(CpuRegister dst, CpuRegister src);
+ void orq(CpuRegister dst, const Immediate& imm);
void xorl(CpuRegister dst, CpuRegister src);
void xorl(CpuRegister dst, const Immediate& imm);
@@ -467,6 +468,7 @@
void imulq(CpuRegister dst, CpuRegister src);
void imulq(CpuRegister reg, const Immediate& imm);
void imulq(CpuRegister reg, const Address& address);
+ void imulq(CpuRegister dst, CpuRegister reg, const Immediate& imm);
void imull(CpuRegister reg);
void imull(const Address& address);
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 44b07e2..dfea783 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -109,9 +109,14 @@
UsageError("Usage: dex2oat [options]...");
UsageError("");
- UsageError(" --dex-file=<dex-file>: specifies a .dex file to compile.");
+ UsageError(" --dex-file=<dex-file>: specifies a .dex, .jar, or .apk file to compile.");
UsageError(" Example: --dex-file=/system/framework/core.jar");
UsageError("");
+ UsageError(" --dex-location=<dex-location>: specifies an alternative dex location to");
+ UsageError(" encode in the oat file for the corresponding --dex-file argument.");
+ UsageError(" Example: --dex-file=/home/build/out/system/framework/core.jar");
+ UsageError(" --dex-location=/system/framework/core.jar");
+ UsageError("");
UsageError(" --zip-fd=<file-descriptor>: specifies a file descriptor of a zip file");
UsageError(" containing a classes.dex file to compile.");
UsageError(" Example: --zip-fd=5");
@@ -614,7 +619,6 @@
Usage("Unknown compiler backend: %s", backend_str.data());
}
} else if (option.starts_with("--compiler-filter=")) {
- requested_specific_compiler = true;
compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
} else if (option == "--compile-pic") {
compile_pic = true;
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 9755efb..28fbc3e 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -95,9 +95,9 @@
RELOCATE_TYPES += no-relocate
endif
ifeq ($(ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT),true)
- RELOCATE_TYPES := relocate-no-patchoat
+ RELOCATE_TYPES := relocate-npatchoat
endif
-TRACE_TYPES := no-trace
+TRACE_TYPES := ntrace
ifeq ($(ART_TEST_TRACE),true)
TRACE_TYPES += trace
endif
@@ -119,7 +119,7 @@
ifeq ($(ART_TEST_PIC_IMAGE),true)
IMAGE_TYPES += picimage
endif
-PICTEST_TYPES := nopictest
+PICTEST_TYPES := npictest
ifeq ($(ART_TEST_PIC_TEST),true)
PICTEST_TYPES += pictest
endif
@@ -130,7 +130,7 @@
ifeq ($(ART_TEST_RUN_TEST_NDEBUG),true)
RUN_TYPES += ndebug
endif
-DEBUGGABLE_TYPES := nondebuggable
+DEBUGGABLE_TYPES := ndebuggable
ifeq ($(ART_TEST_RUN_TEST_DEBUGGABLE),true)
DEBUGGABLE_TYPES += debuggable
endif
@@ -272,9 +272,9 @@
$(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
-ifneq (,$(filter relocate-no-patchoat,$(RELOCATE_TYPES)))
+ifneq (,$(filter relocate-npatchoat,$(RELOCATE_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES), relocate-no-patchoat,$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(COMPILER_TYPES), relocate-npatchoat,$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
@@ -375,7 +375,7 @@
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),nondebuggable,$(TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES),$(PICTEST_TYPES),ndebuggable,$(TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS :=
@@ -461,10 +461,10 @@
# Create a rule to build and run a tests following the form:
# test-art-{1: host or target}-run-test-{2: debug ndebug}-{3: prebuild no-prebuild no-dex2oat}-
-# {4: interpreter default optimizing jit}-{5: relocate no-relocate relocate-no-patchoat}-
-# {6: trace or no-trace}-{7: gcstress gcverify cms}-{8: forcecopy checkjni jni}-
-# {9: no-image image picimage}-{10: pictest nopictest}-
-# {11: nondebuggable debuggable}-{12: test name}{13: 32 or 64}
+# {4: interpreter default optimizing jit}-{5: relocate nrelocate relocate-npatchoat}-
+# {6: trace or ntrace}-{7: gcstress gcverify cms}-{8: forcecopy checkjni jni}-
+# {9: no-image image picimage}-{10: pictest npictest}-
+# {11: ndebuggable debuggable}-{12: test name}{13: 32 or 64}
define define-test-art-run-test
run_test_options :=
prereq_rule :=
@@ -543,7 +543,7 @@
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_RELOCATE_RULES
run_test_options += --no-relocate
else
- ifeq ($(5),relocate-no-patchoat)
+ ifeq ($(5),relocate-npatchoat)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_RELOCATE_NO_PATCHOAT_RULES
run_test_options += --relocate --no-patchoat
else
@@ -555,7 +555,7 @@
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_TRACE_RULES
run_test_options += --trace
else
- ifeq ($(6),no-trace)
+ ifeq ($(6),ntrace)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_TRACE_RULES
else
$$(error found $(6) expected $(TRACE_TYPES))
@@ -635,7 +635,7 @@
ifeq ($(10),pictest)
run_test_options += --pic-test
else
- ifeq ($(10),nopictest)
+ ifeq ($(10),npictest)
# Nothing to be done.
else
$$(error found $(10) expected $(PICTEST_TYPES))
@@ -645,7 +645,7 @@
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_DEBUGGABLE_RULES
run_test_options += --debuggable
else
- ifeq ($(11),nondebuggable)
+ ifeq ($(11),ndebuggable)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NONDEBUGGABLE_RULES
# Nothing to be done.
else
diff --git a/test/run-test b/test/run-test
index df0fce4..2873a35 100755
--- a/test/run-test
+++ b/test/run-test
@@ -441,8 +441,8 @@
echo " --build-only Build test files only (off by default)."
echo " --interpreter Enable interpreter only mode (off by default)."
echo " --jit Enable jit (off by default)."
- echo " --optimizing Enable optimizing compiler (off by default)."
- echo " --quick Use Quick compiler (default)."
+ echo " --optimizing Enable optimizing compiler (default)."
+ echo " --quick Use Quick compiler (off by default)."
echo " --no-verify Turn off verification (on by default)."
echo " --no-optimize Turn off optimization (on by default)."
echo " --no-precise Turn off precise GC (on by default)."