Merge "Revert "Avoid creating verified methods for non quickening filters""
diff --git a/Android.bp b/Android.bp
index d0e22fb..0ce86d8 100644
--- a/Android.bp
+++ b/Android.bp
@@ -35,6 +35,7 @@
"profman",
"runtime",
"sigchainlib",
+ "simulator",
"test",
"tools/cpp-define-generator",
"tools/dmtracedump",
diff --git a/compiler/Android.bp b/compiler/Android.bp
index b721d21..f11d256 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -423,8 +423,11 @@
},
},
+ header_libs: ["libart_simulator_headers"],
+
shared_libs: [
"libartd-compiler",
+ "libartd-simulator-container",
"libvixld-arm",
"libvixld-arm64",
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 73202b4..51a0bae 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -446,6 +446,16 @@
return GetFrameSize() == (CallPushesPC() ? GetWordSize() : 0);
}
+ static int8_t GetInt8ValueOf(HConstant* constant) {
+ DCHECK(constant->IsIntConstant());
+ return constant->AsIntConstant()->GetValue();
+ }
+
+ static int16_t GetInt16ValueOf(HConstant* constant) {
+ DCHECK(constant->IsIntConstant());
+ return constant->AsIntConstant()->GetValue();
+ }
+
static int32_t GetInt32ValueOf(HConstant* constant) {
if (constant->IsIntConstant()) {
return constant->AsIntConstant()->GetValue();
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 142cc7f..99b7793 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4956,8 +4956,8 @@
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
if (value.IsConstant()) {
- int16_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
- __ movw(Address(base, offset), Immediate(v));
+ __ movw(Address(base, offset),
+ Immediate(CodeGenerator::GetInt16ValueOf(value.GetConstant())));
} else {
__ movw(Address(base, offset), value.AsRegister<Register>());
}
@@ -5404,7 +5404,7 @@
if (value.IsRegister()) {
__ movb(address, value.AsRegister<ByteRegister>());
} else {
- __ movb(address, Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ __ movb(address, Immediate(CodeGenerator::GetInt8ValueOf(value.GetConstant())));
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
@@ -5417,7 +5417,7 @@
if (value.IsRegister()) {
__ movw(address, value.AsRegister<Register>());
} else {
- __ movw(address, Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ __ movw(address, Immediate(CodeGenerator::GetInt16ValueOf(value.GetConstant())));
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 86f6d51..8283887 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -4425,8 +4425,8 @@
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
if (value.IsConstant()) {
- int8_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
- __ movb(Address(base, offset), Immediate(v));
+ __ movb(Address(base, offset),
+ Immediate(CodeGenerator::GetInt8ValueOf(value.GetConstant())));
} else {
__ movb(Address(base, offset), value.AsRegister<CpuRegister>());
}
@@ -4436,8 +4436,8 @@
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
if (value.IsConstant()) {
- int16_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
- __ movw(Address(base, offset), Immediate(v));
+ __ movw(Address(base, offset),
+ Immediate(CodeGenerator::GetInt16ValueOf(value.GetConstant())));
} else {
__ movw(Address(base, offset), value.AsRegister<CpuRegister>());
}
@@ -4861,7 +4861,7 @@
if (value.IsRegister()) {
__ movb(address, value.AsRegister<CpuRegister>());
} else {
- __ movb(address, Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ __ movb(address, Immediate(CodeGenerator::GetInt8ValueOf(value.GetConstant())));
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
@@ -4875,7 +4875,7 @@
__ movw(address, value.AsRegister<CpuRegister>());
} else {
DCHECK(value.IsConstant()) << value;
- __ movw(address, Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ __ movw(address, Immediate(CodeGenerator::GetInt16ValueOf(value.GetConstant())));
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index 1b38acd..cada2e6 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -28,6 +28,7 @@
#include "arch/x86/instruction_set_features_x86.h"
#include "arch/x86/registers_x86.h"
#include "arch/x86_64/instruction_set_features_x86_64.h"
+#include "code_simulator.h"
#include "code_simulator_container.h"
#include "common_compiler_test.h"
#include "graph_checker.h"
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 51101f1..76a243f 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1210,14 +1210,14 @@
uint8_t* stack_map_data = nullptr;
uint8_t* method_info_data = nullptr;
uint8_t* roots_data = nullptr;
- uint32_t data_size = code_cache->ReserveData(self,
- stack_map_size,
- method_info_size,
- number_of_roots,
- method,
- &stack_map_data,
- &method_info_data,
- &roots_data);
+ code_cache->ReserveData(self,
+ stack_map_size,
+ method_info_size,
+ number_of_roots,
+ method,
+ &stack_map_data,
+ &method_info_data,
+ &roots_data);
if (stack_map_data == nullptr || roots_data == nullptr) {
return false;
}
@@ -1238,7 +1238,6 @@
codegen->GetFpuSpillMask(),
code_allocator.GetMemory().data(),
code_allocator.GetSize(),
- data_size,
osr,
roots,
codegen->GetGraph()->HasShouldDeoptimizeFlag(),
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 24e3450..2cbabcf 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -2920,6 +2920,102 @@
static_cast<FRegister>(wt));
}
+void MipsAssembler::MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x0, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::MaddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x1, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::MaddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x2, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::MaddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x3, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::MsubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x0, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::MsubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x1, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::MsubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x2, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::MsubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x3, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FmaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x0, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FmaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x1, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FmsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x2, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FmsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x3, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
void MipsAssembler::ReplicateFPToVectorRegister(VectorRegister dst,
FRegister src,
bool is_double) {
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index e42bb3f..a7ff931 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -613,6 +613,19 @@
void IlvrW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MaddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MaddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MaddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MsubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MsubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MsubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MsubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+
// Helper for replicating floating point value in all destination elements.
void ReplicateFPToVectorRegister(VectorRegister dst, FRegister src, bool is_double);
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 6ee2a5c..b72a14e 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -1752,6 +1752,66 @@
DriverStr(RepeatVVV(&mips::MipsAssembler::IlvrD, "ilvr.d ${reg1}, ${reg2}, ${reg3}"), "ilvr.d");
}
+TEST_F(AssemblerMIPS32r6Test, MaddvB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::MaddvB, "maddv.b ${reg1}, ${reg2}, ${reg3}"),
+ "maddv.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MaddvH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::MaddvH, "maddv.h ${reg1}, ${reg2}, ${reg3}"),
+ "maddv.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MaddvW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::MaddvW, "maddv.w ${reg1}, ${reg2}, ${reg3}"),
+ "maddv.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MaddvD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::MaddvD, "maddv.d ${reg1}, ${reg2}, ${reg3}"),
+ "maddv.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MsubvB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::MsubvB, "msubv.b ${reg1}, ${reg2}, ${reg3}"),
+ "msubv.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MsubvH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::MsubvH, "msubv.h ${reg1}, ${reg2}, ${reg3}"),
+ "msubv.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MsubvW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::MsubvW, "msubv.w ${reg1}, ${reg2}, ${reg3}"),
+ "msubv.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MsubvD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::MsubvD, "msubv.d ${reg1}, ${reg2}, ${reg3}"),
+ "msubv.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FmaddW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FmaddW, "fmadd.w ${reg1}, ${reg2}, ${reg3}"),
+ "fmadd.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FmaddD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FmaddD, "fmadd.d ${reg1}, ${reg2}, ${reg3}"),
+ "fmadd.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FmsubW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FmsubW, "fmsub.w ${reg1}, ${reg2}, ${reg3}"),
+ "fmsub.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FmsubD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FmsubD, "fmsub.d ${reg1}, ${reg2}, ${reg3}"),
+ "fmsub.d");
+}
+
#undef __
} // namespace art
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 9039854..7a1beb6 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -1899,6 +1899,66 @@
EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x14);
}
+void Mips64Assembler::MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x0, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::MaddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x1, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::MaddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x2, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::MaddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x1, 0x3, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::MsubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x0, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::MsubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x1, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::MsubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x2, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::MsubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x3, wt, ws, wd, 0x12);
+}
+
+void Mips64Assembler::FmaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x0, wt, ws, wd, 0x1b);
+}
+
+void Mips64Assembler::FmaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x1, wt, ws, wd, 0x1b);
+}
+
+void Mips64Assembler::FmsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x2, wt, ws, wd, 0x1b);
+}
+
+void Mips64Assembler::FmsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x3, wt, ws, wd, 0x1b);
+}
+
void Mips64Assembler::ReplicateFPToVectorRegister(VectorRegister dst,
FpuRegister src,
bool is_double) {
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 5e88033..c39d120 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -796,6 +796,19 @@
void IlvrW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MaddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MaddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MaddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MsubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MsubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MsubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MsubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+
// Helper for replicating floating point value in all destination elements.
void ReplicateFPToVectorRegister(VectorRegister dst, FpuRegister src, bool is_double);
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index bdf9598..021e335 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -3340,6 +3340,66 @@
"ilvr.d");
}
+TEST_F(AssemblerMIPS64Test, MaddvB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::MaddvB, "maddv.b ${reg1}, ${reg2}, ${reg3}"),
+ "maddv.b");
+}
+
+TEST_F(AssemblerMIPS64Test, MaddvH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::MaddvH, "maddv.h ${reg1}, ${reg2}, ${reg3}"),
+ "maddv.h");
+}
+
+TEST_F(AssemblerMIPS64Test, MaddvW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::MaddvW, "maddv.w ${reg1}, ${reg2}, ${reg3}"),
+ "maddv.w");
+}
+
+TEST_F(AssemblerMIPS64Test, MaddvD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::MaddvD, "maddv.d ${reg1}, ${reg2}, ${reg3}"),
+ "maddv.d");
+}
+
+TEST_F(AssemblerMIPS64Test, MsubvB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::MsubvB, "msubv.b ${reg1}, ${reg2}, ${reg3}"),
+ "msubv.b");
+}
+
+TEST_F(AssemblerMIPS64Test, MsubvH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::MsubvH, "msubv.h ${reg1}, ${reg2}, ${reg3}"),
+ "msubv.h");
+}
+
+TEST_F(AssemblerMIPS64Test, MsubvW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::MsubvW, "msubv.w ${reg1}, ${reg2}, ${reg3}"),
+ "msubv.w");
+}
+
+TEST_F(AssemblerMIPS64Test, MsubvD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::MsubvD, "msubv.d ${reg1}, ${reg2}, ${reg3}"),
+ "msubv.d");
+}
+
+TEST_F(AssemblerMIPS64Test, FmaddW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmaddW, "fmadd.w ${reg1}, ${reg2}, ${reg3}"),
+ "fmadd.w");
+}
+
+TEST_F(AssemblerMIPS64Test, FmaddD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmaddD, "fmadd.d ${reg1}, ${reg2}, ${reg3}"),
+ "fmadd.d");
+}
+
+TEST_F(AssemblerMIPS64Test, FmsubW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmsubW, "fmsub.w ${reg1}, ${reg2}, ${reg3}"),
+ "fmsub.w");
+}
+
+TEST_F(AssemblerMIPS64Test, FmsubD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmsubD, "fmsub.d ${reg1}, ${reg2}, ${reg3}"),
+ "fmsub.d");
+}
+
#undef __
} // namespace art
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index 346f5a7..0d453ef 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -30,15 +30,6 @@
android: {
// Use the 32-bit version of dex2oat on devices
compile_multilib: "prefer32",
-
- sanitize: {
- // ASan slows down dex2oat by ~3.5x, which translates into
- // extremely slow first boot. Disabled to help speed up
- // SANITIZE_TARGET mode.
- // Bug: 22233158
- address: false,
- coverage: false,
- },
},
},
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 7cb216e..1a395a4 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -477,6 +477,10 @@
{ kMsaSpecialMask | (0xf << 2), kMsa | (0x8 << 2), "ld", "kw" },
{ kMsaSpecialMask | (0xf << 2), kMsa | (0x9 << 2), "st", "kw" },
{ kMsaMask | (0x7 << 23), kMsa | (0x5 << 23) | 0x14, "ilvr", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x1 << 23) | 0x12, "maddv", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x2 << 23) | 0x12, "msubv", "Vkmn" },
+ { kMsaMask | (0xf << 22), kMsa | (0x4 << 22) | 0x1b, "fmadd", "Ukmn" },
+ { kMsaMask | (0xf << 22), kMsa | (0x5 << 22) | 0x1b, "fmsub", "Ukmn" },
};
static uint32_t ReadU32(const uint8_t* ptr) {
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 9ac51f1..fb8e894 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -396,7 +396,7 @@
class_data_[klass].AddDirtyObject(entry, entry_remote);
}
- void DiffEntryContents(mirror::Object* entry, uint8_t* remote_bytes)
+ void DiffEntryContents(mirror::Object* entry, uint8_t* remote_bytes, const uint8_t* base_ptr)
REQUIRES_SHARED(Locks::mutator_lock_) {
const char* tabs = " ";
// Attempt to find fields for all dirty bytes.
@@ -414,10 +414,9 @@
// Examine the bytes comprising the Object, computing which fields are dirty
// and recording them for later display. If the Object is an array object,
// compute the dirty entries.
- const uint8_t* entry_bytes = reinterpret_cast<const uint8_t*>(entry);
mirror::Object* remote_entry = reinterpret_cast<mirror::Object*>(remote_bytes);
for (size_t i = 0, count = entry->SizeOf(); i < count; ++i) {
- if (entry_bytes[i] != remote_bytes[i]) {
+ if (base_ptr[i] != remote_bytes[i]) {
ArtField* field = ArtField::FindInstanceFieldWithOffset</*exact*/false>(klass, i);
if (field != nullptr) {
dirty_instance_fields.insert(field);
@@ -651,7 +650,8 @@
}
void DiffEntryContents(ArtMethod* method ATTRIBUTE_UNUSED,
- uint8_t* remote_bytes ATTRIBUTE_UNUSED)
+ uint8_t* remote_bytes ATTRIBUTE_UNUSED,
+ const uint8_t* base_ptr ATTRIBUTE_UNUSED)
REQUIRES_SHARED(Locks::mutator_lock_) {
}
@@ -753,29 +753,39 @@
<< RegionCommon<T>::GetFalseDirtyEntryCount() << " false dirty entries,\n "
<< RegionCommon<T>::GetFalseDirtyEntryBytes() << " false dirty entry [bytes], \n "
<< true_dirtied_percent << " different entries-vs-total in a dirty page;\n "
- << "";
+ << "\n";
- if (RegionCommon<T>::GetZygoteDirtyEntryCount() != 0) {
- // We only reach this point if both pids were specified. Furthermore,
- // entries are only displayed here if they differed in both the image
- // and the zygote, so they are probably private dirty.
- CHECK(remotes == RemoteProcesses::kImageAndZygote);
- os_ << "\n" << " Zygote dirty entries (probably shared dirty): ";
- DiffDirtyEntries(ProcessType::kZygote, begin_image_ptr, RegionCommon<T>::zygote_contents_);
- }
- os_ << "\n";
+ const uint8_t* base_ptr = begin_image_ptr;
switch (remotes) {
case RemoteProcesses::kZygoteOnly:
os_ << " Zygote shared dirty entries: ";
break;
case RemoteProcesses::kImageAndZygote:
os_ << " Application dirty entries (private dirty): ";
+ // If we are dumping private dirty, diff against the zygote map to make it clearer what
+ // fields caused the page to be private dirty.
+ base_ptr = &RegionCommon<T>::zygote_contents_->operator[](0);
break;
case RemoteProcesses::kImageOnly:
os_ << " Application dirty entries (unknown whether private or shared dirty): ";
break;
}
- DiffDirtyEntries(ProcessType::kRemote, begin_image_ptr, RegionCommon<T>::remote_contents_);
+ DiffDirtyEntries(ProcessType::kRemote,
+ begin_image_ptr,
+ RegionCommon<T>::remote_contents_,
+ base_ptr);
+ // Print shared dirty after since it's less important.
+ if (RegionCommon<T>::GetZygoteDirtyEntryCount() != 0) {
+ // We only reach this point if both pids were specified. Furthermore,
+ // entries are only displayed here if they differed in both the image
+ // and the zygote, so they are probably private dirty.
+ CHECK(remotes == RemoteProcesses::kImageAndZygote);
+ os_ << "\n" << " Zygote dirty entries (probably shared dirty): ";
+ DiffDirtyEntries(ProcessType::kZygote,
+ begin_image_ptr,
+ RegionCommon<T>::zygote_contents_,
+ begin_image_ptr);
+ }
RegionSpecializedBase<T>::DumpDirtyEntries();
RegionSpecializedBase<T>::DumpFalseDirtyEntries();
RegionSpecializedBase<T>::DumpCleanEntries();
@@ -786,7 +796,8 @@
void DiffDirtyEntries(ProcessType process_type,
const uint8_t* begin_image_ptr,
- std::vector<uint8_t>* contents)
+ std::vector<uint8_t>* contents,
+ const uint8_t* base_ptr)
REQUIRES_SHARED(Locks::mutator_lock_) {
os_ << RegionCommon<T>::dirty_entries_.size() << "\n";
const std::set<T*>& entries =
@@ -797,7 +808,7 @@
uint8_t* entry_bytes = reinterpret_cast<uint8_t*>(entry);
ptrdiff_t offset = entry_bytes - begin_image_ptr;
uint8_t* remote_bytes = &(*contents)[offset];
- RegionSpecializedBase<T>::DiffEntryContents(entry, remote_bytes);
+ RegionSpecializedBase<T>::DiffEntryContents(entry, remote_bytes, &base_ptr[offset]);
}
}
@@ -810,34 +821,42 @@
ptrdiff_t offset = current - begin_image_ptr;
T* entry_remote =
reinterpret_cast<T*>(const_cast<uint8_t*>(&(*RegionCommon<T>::remote_contents_)[offset]));
+ const bool have_zygote = !RegionCommon<T>::zygote_contents_->empty();
const uint8_t* current_zygote =
- RegionCommon<T>::zygote_contents_->empty() ? nullptr :
- &(*RegionCommon<T>::zygote_contents_)[offset];
+ have_zygote ? &(*RegionCommon<T>::zygote_contents_)[offset] : nullptr;
T* entry_zygote = reinterpret_cast<T*>(const_cast<uint8_t*>(current_zygote));
// Visit and classify entries at the current location.
RegionSpecializedBase<T>::VisitEntry(entry);
- bool different_image_entry = EntriesDiffer(entry, entry_remote);
- if (different_image_entry) {
- bool different_zygote_entry = false;
- if (entry_zygote != nullptr) {
- different_zygote_entry = EntriesDiffer(entry, entry_zygote);
- }
- if (different_zygote_entry) {
- // Different from zygote.
- RegionCommon<T>::AddZygoteDirtyEntry(entry);
- RegionSpecializedBase<T>::AddDirtyEntry(entry, entry_remote);
- } else {
- // Just different from image.
+
+ // Test private dirty first.
+ bool is_dirty = false;
+ if (have_zygote) {
+ bool private_dirty = EntriesDiffer(entry_zygote, entry_remote);
+ if (private_dirty) {
+ // Private dirty, app vs zygote.
+ is_dirty = true;
RegionCommon<T>::AddImageDirtyEntry(entry);
- RegionSpecializedBase<T>::AddDirtyEntry(entry, entry_remote);
}
+ if (EntriesDiffer(entry_zygote, entry)) {
+ // Shared dirty, zygote vs image.
+ is_dirty = true;
+ RegionCommon<T>::AddZygoteDirtyEntry(entry);
+ }
+ } else if (EntriesDiffer(entry_remote, entry)) {
+ // Shared or private dirty, app vs image.
+ is_dirty = true;
+ RegionCommon<T>::AddImageDirtyEntry(entry);
+ }
+ if (is_dirty) {
+ // TODO: Add support dirty entries in zygote and image.
+ RegionSpecializedBase<T>::AddDirtyEntry(entry, entry_remote);
} else {
RegionSpecializedBase<T>::AddCleanEntry(entry);
- }
- if (!different_image_entry && RegionCommon<T>::IsEntryOnDirtyPage(entry, dirty_pages)) {
- // This entry was either never mutated or got mutated back to the same value.
- // TODO: Do I want to distinguish a "different" vs a "dirty" page here?
- RegionSpecializedBase<T>::AddFalseDirtyEntry(entry);
+ if (RegionCommon<T>::IsEntryOnDirtyPage(entry, dirty_pages)) {
+ // This entry was either never mutated or got mutated back to the same value.
+ // TODO: Do I want to distinguish a "different" vs a "dirty" page here?
+ RegionSpecializedBase<T>::AddFalseDirtyEntry(entry);
+ }
}
}
@@ -945,7 +964,7 @@
return false;
}
// The boot map should be at the same address.
- tmp_zygote_contents.reserve(boot_map_size_);
+ tmp_zygote_contents.resize(boot_map_size_);
if (!zygote_map_file->PreadFully(&tmp_zygote_contents[0], boot_map_size_, boot_map_.start)) {
LOG(WARNING) << "Could not fully read zygote file " << zygote_file_name;
return false;
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 8d15c34..18a4c8c 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -50,7 +50,6 @@
"class_linker.cc",
"class_loader_context.cc",
"class_table.cc",
- "code_simulator_container.cc",
"common_throws.cc",
"compiler_filter.cc",
"debugger.cc",
@@ -629,5 +628,4 @@
subdirs = [
"openjdkjvm",
"openjdkjvmti",
- "simulator",
]
diff --git a/runtime/cha.cc b/runtime/cha.cc
index 8eeebf3..6c011e8 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -19,6 +19,7 @@
#include "art_method-inl.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
+#include "linear_alloc.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
@@ -581,4 +582,17 @@
}
}
+void ClassHierarchyAnalysis::RemoveDependenciesForLinearAlloc(const LinearAlloc* linear_alloc) {
+ MutexLock mu(Thread::Current(), *Locks::cha_lock_);
+ for (auto it = cha_dependency_map_.begin(); it != cha_dependency_map_.end(); ) {
+ // Use unsafe to avoid locking since the allocator is going to be deleted.
+ if (linear_alloc->ContainsUnsafe(it->first)) {
+ // About to delete the ArtMethod, erase the entry from the map.
+ it = cha_dependency_map_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
} // namespace art
diff --git a/runtime/cha.h b/runtime/cha.h
index 99224e0..40999dd 100644
--- a/runtime/cha.h
+++ b/runtime/cha.h
@@ -29,6 +29,7 @@
namespace art {
class ArtMethod;
+class LinearAlloc;
/**
* Class Hierarchy Analysis (CHA) tries to devirtualize virtual calls into
@@ -112,6 +113,11 @@
// Update CHA info for methods that `klass` overrides, after loading `klass`.
void UpdateAfterLoadingOf(Handle<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Remove all of the dependencies for a linear allocator. This is called when dex cache unloading
+ // occurs.
+ void RemoveDependenciesForLinearAlloc(const LinearAlloc* linear_alloc)
+ REQUIRES(!Locks::cha_lock_);
+
private:
void InitSingleImplementationFlag(Handle<mirror::Class> klass,
ArtMethod* method,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index c5e11f1..e5ed8ae 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -370,7 +370,10 @@
quick_imt_conflict_trampoline_(nullptr),
quick_generic_jni_trampoline_(nullptr),
quick_to_interpreter_bridge_trampoline_(nullptr),
- image_pointer_size_(kRuntimePointerSize) {
+ image_pointer_size_(kRuntimePointerSize),
+ cha_(Runtime::Current()->IsAotCompiler() ? nullptr : new ClassHierarchyAnalysis()) {
+ // For CHA disabled during Aot, see b/34193647.
+
CHECK(intern_table_ != nullptr);
static_assert(kFindArrayCacheSize == arraysize(find_array_class_cache_),
"Array cache size wrong.");
@@ -1138,49 +1141,6 @@
const ImageHeader& header_;
};
-class VerifyClassInTableArtMethodVisitor : public ArtMethodVisitor {
- public:
- explicit VerifyClassInTableArtMethodVisitor(ClassTable* table) : table_(table) {}
-
- virtual void Visit(ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_, Locks::classlinker_classes_lock_) {
- ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
- if (klass != nullptr && !Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
- CHECK_EQ(table_->LookupByDescriptor(klass), klass) << mirror::Class::PrettyClass(klass);
- }
- }
-
- private:
- ClassTable* const table_;
-};
-
-class VerifyDirectInterfacesInTableClassVisitor {
- public:
- explicit VerifyDirectInterfacesInTableClassVisitor(ObjPtr<mirror::ClassLoader> class_loader)
- : class_loader_(class_loader), self_(Thread::Current()) { }
-
- bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) {
- if (!klass->IsPrimitive() && klass->GetClassLoader() == class_loader_) {
- classes_.push_back(klass);
- }
- return true;
- }
-
- void Check() const REQUIRES_SHARED(Locks::mutator_lock_) {
- for (ObjPtr<mirror::Class> klass : classes_) {
- for (uint32_t i = 0, num = klass->NumDirectInterfaces(); i != num; ++i) {
- CHECK(klass->GetDirectInterface(self_, klass, i) != nullptr)
- << klass->PrettyDescriptor() << " iface #" << i;
- }
- }
- }
-
- private:
- ObjPtr<mirror::ClassLoader> class_loader_;
- Thread* self_;
- std::vector<ObjPtr<mirror::Class>> classes_;
-};
-
class VerifyDeclaringClassVisitor : public ArtMethodVisitor {
public:
VerifyDeclaringClassVisitor() REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
@@ -1763,6 +1723,63 @@
std::vector<const ImageSection*> runtime_method_sections_;
};
+static void VerifyAppImage(const ImageHeader& header,
+ const Handle<mirror::ClassLoader>& class_loader,
+ const Handle<mirror::ObjectArray<mirror::DexCache> >& dex_caches,
+ ClassTable* class_table, gc::space::ImageSpace* space)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ {
+ class VerifyClassInTableArtMethodVisitor : public ArtMethodVisitor {
+ public:
+ explicit VerifyClassInTableArtMethodVisitor(ClassTable* table) : table_(table) {}
+
+ virtual void Visit(ArtMethod* method)
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::classlinker_classes_lock_) {
+ ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
+ if (klass != nullptr && !Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
+ CHECK_EQ(table_->LookupByDescriptor(klass), klass) << mirror::Class::PrettyClass(klass);
+ }
+ }
+
+ private:
+ ClassTable* const table_;
+ };
+ VerifyClassInTableArtMethodVisitor visitor(class_table);
+ header.VisitPackedArtMethods(&visitor, space->Begin(), kRuntimePointerSize);
+ }
+ {
+ // Verify that all direct interfaces of classes in the class table are also resolved.
+ std::vector<ObjPtr<mirror::Class>> classes;
+ auto verify_direct_interfaces_in_table = [&](ObjPtr<mirror::Class> klass)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!klass->IsPrimitive() && klass->GetClassLoader() == class_loader.Get()) {
+ classes.push_back(klass);
+ }
+ return true;
+ };
+ class_table->Visit(verify_direct_interfaces_in_table);
+ Thread* self = Thread::Current();
+ for (ObjPtr<mirror::Class> klass : classes) {
+ for (uint32_t i = 0, num = klass->NumDirectInterfaces(); i != num; ++i) {
+ CHECK(klass->GetDirectInterface(self, klass, i) != nullptr)
+ << klass->PrettyDescriptor() << " iface #" << i;
+ }
+ }
+ }
+ // Check that all non-primitive classes in dex caches are also in the class table.
+ for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
+ ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
+ mirror::TypeDexCacheType* const types = dex_cache->GetResolvedTypes();
+ for (int32_t j = 0, num_types = dex_cache->NumResolvedTypes(); j < num_types; j++) {
+ ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read();
+ if (klass != nullptr && !klass->IsPrimitive()) {
+ CHECK(class_table->Contains(klass))
+ << klass->PrettyDescriptor() << " " << dex_cache->GetDexFile()->GetLocation();
+ }
+ }
+ }
+}
+
bool ClassLinker::AddImageSpace(
gc::space::ImageSpace* space,
Handle<mirror::ClassLoader> class_loader,
@@ -2016,28 +2033,13 @@
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
class_table->AddClassSet(std::move(temp_set));
}
+
if (kIsDebugBuild && app_image) {
// This verification needs to happen after the classes have been added to the class loader.
// Since it ensures classes are in the class table.
- VerifyClassInTableArtMethodVisitor visitor2(class_table);
- header.VisitPackedArtMethods(&visitor2, space->Begin(), kRuntimePointerSize);
- // Verify that all direct interfaces of classes in the class table are also resolved.
- VerifyDirectInterfacesInTableClassVisitor visitor(class_loader.Get());
- class_table->Visit(visitor);
- visitor.Check();
- // Check that all non-primitive classes in dex caches are also in the class table.
- for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
- ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
- mirror::TypeDexCacheType* const types = dex_cache->GetResolvedTypes();
- for (int32_t j = 0, num_types = dex_cache->NumResolvedTypes(); j < num_types; j++) {
- ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read();
- if (klass != nullptr && !klass->IsPrimitive()) {
- CHECK(class_table->Contains(klass)) << klass->PrettyDescriptor()
- << " " << dex_cache->GetDexFile()->GetLocation();
- }
- }
- }
+ VerifyAppImage(header, class_loader, dex_caches, class_table, space);
}
+
VLOG(class_linker) << "Adding image space took " << PrettyDuration(NanoTime() - start_time);
return true;
}
@@ -2318,8 +2320,12 @@
if (runtime->GetJit() != nullptr) {
jit::JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache();
if (code_cache != nullptr) {
+ // For the JIT case, RemoveMethodsIn removes the CHA dependencies.
code_cache->RemoveMethodsIn(self, *data.allocator);
}
+ } else if (cha_ != nullptr) {
+ // If we don't have a JIT, we need to manually remove the CHA dependencies manually.
+ cha_->RemoveDependenciesForLinearAlloc(data.allocator);
}
delete data.allocator;
delete data.class_table;
@@ -3489,7 +3495,8 @@
ObjPtr<mirror::DexCache> dex_cache) {
CHECK(dex_cache != nullptr) << dex_file.GetLocation();
boot_class_path_.push_back(&dex_file);
- RegisterBootClassPathDexFile(dex_file, dex_cache);
+ WriterMutexLock mu(Thread::Current(), *Locks::dex_lock_);
+ RegisterDexFileLocked(dex_file, dex_cache, /* class_loader */ nullptr);
}
void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
@@ -3672,12 +3679,6 @@
return h_dex_cache.Get();
}
-void ClassLinker::RegisterBootClassPathDexFile(const DexFile& dex_file,
- ObjPtr<mirror::DexCache> dex_cache) {
- WriterMutexLock mu(Thread::Current(), *Locks::dex_lock_);
- RegisterDexFileLocked(dex_file, dex_cache, /* class_loader */ nullptr);
-}
-
bool ClassLinker::IsDexFileRegistered(Thread* self, const DexFile& dex_file) {
ReaderMutexLock mu(self, *Locks::dex_lock_);
return DecodeDexCache(self, FindDexCacheDataLocked(dex_file)) != nullptr;
@@ -5482,7 +5483,9 @@
// Update CHA info based on whether we override methods.
// Have to do this before setting the class as resolved which allows
// instantiation of klass.
- Runtime::Current()->GetClassHierarchyAnalysis()->UpdateAfterLoadingOf(klass);
+ if (cha_ != nullptr) {
+ cha_->UpdateAfterLoadingOf(klass);
+ }
// This will notify waiters on klass that saw the not yet resolved
// class in the class_table_ during EnsureResolved.
@@ -5530,7 +5533,9 @@
// Update CHA info based on whether we override methods.
// Have to do this before setting the class as resolved which allows
// instantiation of klass.
- Runtime::Current()->GetClassHierarchyAnalysis()->UpdateAfterLoadingOf(h_new_class);
+ if (cha_ != nullptr) {
+ cha_->UpdateAfterLoadingOf(h_new_class);
+ }
// This will notify waiters on temp class that saw the not yet resolved class in the
// class_table_ during EnsureResolved.
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index cb28187..62fb45b 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -60,6 +60,7 @@
using MethodDexCacheType = std::atomic<MethodDexCachePair>;
} // namespace mirror
+class ClassHierarchyAnalysis;
class ClassTable;
template<class T> class Handle;
class ImtConflictTable;
@@ -396,9 +397,6 @@
ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- void RegisterBootClassPathDexFile(const DexFile& dex_file, ObjPtr<mirror::DexCache> dex_cache)
- REQUIRES(!Locks::dex_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
const std::vector<const DexFile*>& GetBootClassPath() {
return boot_class_path_;
@@ -672,6 +670,10 @@
bool ValidateSuperClassDescriptors(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
+ ClassHierarchyAnalysis* GetClassHierarchyAnalysis() {
+ return cha_.get();
+ }
+
struct DexCacheData {
// Construct an invalid data object.
DexCacheData()
@@ -718,7 +720,7 @@
REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- static void DeleteClassLoader(Thread* self, const ClassLoaderData& data)
+ void DeleteClassLoader(Thread* self, const ClassLoaderData& data)
REQUIRES_SHARED(Locks::mutator_lock_);
void VisitClassesInternal(ClassVisitor* visitor)
@@ -1268,6 +1270,8 @@
// Image pointer size.
PointerSize image_pointer_size_;
+ std::unique_ptr<ClassHierarchyAnalysis> cha_;
+
class FindVirtualMethodHolderVisitor;
friend class AppImageClassLoadersAndDexCachesHelper;
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index eab3b86..ff440d7 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -145,6 +145,10 @@
// ClasspathElem is the path of dex/jar/apk file.
bool ClassLoaderContext::Parse(const std::string& spec, bool parse_checksums) {
if (spec.empty()) {
+ // By default we load the dex files in a PathClassLoader.
+ // So an empty spec is equivalent to an empty PathClassLoader (this happens when running
+ // tests)
+ class_loader_chain_.push_back(ClassLoaderInfo(kPathClassLoader));
return true;
}
@@ -265,11 +269,15 @@
return OatFile::kSpecialSharedLibrary;
}
- if (class_loader_chain_.empty()) {
- return "";
- }
-
std::ostringstream out;
+ if (class_loader_chain_.empty()) {
+ // We can get in this situation if the context was created with a class path containing the
+ // source dex files which were later removed (happens during run-tests).
+ out << GetClassLoaderTypeName(kPathClassLoader)
+ << kClassLoaderOpeningMark
+ << kClassLoaderClosingMark;
+ return out.str();
+ }
for (size_t i = 0; i < class_loader_chain_.size(); i++) {
const ClassLoaderInfo& info = class_loader_chain_[i];
diff --git a/runtime/class_loader_context_test.cc b/runtime/class_loader_context_test.cc
index 5655aec..2b85188 100644
--- a/runtime/class_loader_context_test.cc
+++ b/runtime/class_loader_context_test.cc
@@ -161,6 +161,19 @@
}
};
+TEST_F(ClassLoaderContextTest, ParseValidEmptyContext) {
+ std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create("");
+ // An empty context should create a single empty PathClassLoader.
+ VerifyContextSize(context.get(), 1);
+ VerifyClassLoaderPCL(context.get(), 0, "");
+}
+
+TEST_F(ClassLoaderContextTest, ParseValidSharedLibraryContext) {
+ std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create("&");
+ // An shared library context should have no class loader in the chain.
+ VerifyContextSize(context.get(), 0);
+}
+
TEST_F(ClassLoaderContextTest, ParseValidContextPCL) {
std::unique_ptr<ClassLoaderContext> context =
ClassLoaderContext::Create("PCL[a.dex]");
@@ -312,6 +325,34 @@
soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
}
+TEST_F(ClassLoaderContextTest, CreateClassLoaderWithSharedLibraryContext) {
+ std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create("&");
+
+ ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, ""));
+
+ std::vector<std::unique_ptr<const DexFile>> compilation_sources = OpenTestDexFiles("MultiDex");
+
+ std::vector<const DexFile*> compilation_sources_raw =
+ MakeNonOwningPointerVector(compilation_sources);
+ jobject jclass_loader = context->CreateClassLoader(compilation_sources_raw);
+ ASSERT_TRUE(jclass_loader != nullptr);
+
+ ScopedObjectAccess soa(Thread::Current());
+
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader = hs.NewHandle(
+ soa.Decode<mirror::ClassLoader>(jclass_loader));
+
+ // A shared library context should create a single PathClassLoader with only the compilation
+ // sources.
+ VerifyClassLoaderDexFiles(soa,
+ class_loader,
+ WellKnownClasses::dalvik_system_PathClassLoader,
+ compilation_sources_raw);
+ ASSERT_TRUE(class_loader->GetParent()->GetClass() ==
+ soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
+}
+
TEST_F(ClassLoaderContextTest, CreateClassLoaderWithComplexChain) {
// Setup the context.
std::vector<std::unique_ptr<const DexFile>> classpath_dex_a = OpenTestDexFiles("ForClassLoaderA");
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index a6c5d6c..be3e4f8 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -476,7 +476,7 @@
case kDirect:
return resolved_method;
case kVirtual: {
- mirror::Class* klass = (*this_object)->GetClass();
+ ObjPtr<mirror::Class> klass = (*this_object)->GetClass();
uint16_t vtable_index = resolved_method->GetMethodIndex();
if (access_check &&
(!klass->HasVTable() ||
@@ -509,7 +509,7 @@
// It is not an interface. If the referring class is in the class hierarchy of the
// referenced class in the bytecode, we use its super class. Otherwise, we throw
// a NoSuchMethodError.
- mirror::Class* super_class = nullptr;
+ ObjPtr<mirror::Class> super_class = nullptr;
if (method_reference_class->IsAssignableFrom(h_referring_class.Get())) {
super_class = h_referring_class->GetSuperClass();
}
@@ -554,11 +554,10 @@
case kInterface: {
uint32_t imt_index = ImTable::GetImtIndex(resolved_method);
PointerSize pointer_size = class_linker->GetImagePointerSize();
- ArtMethod* imt_method = (*this_object)->GetClass()->GetImt(pointer_size)->
- Get(imt_index, pointer_size);
+ ObjPtr<mirror::Class> klass = (*this_object)->GetClass();
+ ArtMethod* imt_method = klass->GetImt(pointer_size)->Get(imt_index, pointer_size);
if (!imt_method->IsRuntimeMethod()) {
if (kIsDebugBuild) {
- mirror::Class* klass = (*this_object)->GetClass();
ArtMethod* method = klass->FindVirtualMethodForInterface(
resolved_method, class_linker->GetImagePointerSize());
CHECK_EQ(imt_method, method) << ArtMethod::PrettyMethod(resolved_method) << " / "
@@ -568,7 +567,7 @@
}
return imt_method;
} else {
- ArtMethod* interface_method = (*this_object)->GetClass()->FindVirtualMethodForInterface(
+ ArtMethod* interface_method = klass->FindVirtualMethodForInterface(
resolved_method, class_linker->GetImagePointerSize());
if (UNLIKELY(interface_method == nullptr)) {
ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method,
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 6250d9f..e08319d 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2483,43 +2483,16 @@
Thread* self,
ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::Object> this_object(raw_this_object);
ScopedQuickEntrypointChecks sqec(self);
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> cls(hs.NewHandle(this_object->GetClass()));
+ StackHandleScope<2> hs(self);
+ Handle<mirror::Object> this_object = hs.NewHandle(raw_this_object);
+ Handle<mirror::Class> cls = hs.NewHandle(this_object->GetClass());
ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
ArtMethod* method = nullptr;
ImTable* imt = cls->GetImt(kRuntimePointerSize);
- if (LIKELY(interface_method != nullptr)) {
- DCHECK_NE(interface_method->GetDexMethodIndex(), DexFile::kDexNoIndex);
- // If the interface method is already resolved, look whether we have a match in the
- // ImtConflictTable.
- ArtMethod* conflict_method = imt->Get(ImTable::GetImtIndex(interface_method),
- kRuntimePointerSize);
- if (LIKELY(conflict_method->IsRuntimeMethod())) {
- ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
- DCHECK(current_table != nullptr);
- method = current_table->Lookup(interface_method, kRuntimePointerSize);
- } else {
- // It seems we aren't really a conflict method!
- method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
- }
- if (method != nullptr) {
- return GetTwoWordSuccessValue(
- reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()),
- reinterpret_cast<uintptr_t>(method));
- }
-
- // No match, use the IfTable.
- method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
- if (UNLIKELY(method == nullptr)) {
- ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(
- interface_method, this_object, caller_method);
- return GetTwoWordFailureValue(); // Failure.
- }
- } else {
+ if (UNLIKELY(interface_method == nullptr)) {
// The interface method is unresolved, so resolve it in the dex file of the caller.
// Fetch the dex_method_idx of the target interface method from the caller.
uint32_t dex_method_idx;
@@ -2538,50 +2511,74 @@
dex_method_idx = instr->VRegB_3rc();
}
- const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()
- ->GetDexFile();
+ const DexFile& dex_file = caller_method->GetDeclaringClass()->GetDexFile();
uint32_t shorty_len;
- const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx),
- &shorty_len);
+ const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(dex_method_idx),
+ &shorty_len);
{
- // Remember the args in case a GC happens in FindMethodFromCode.
+ // Remember the args in case a GC happens in ClassLinker::ResolveMethod().
ScopedObjectAccessUnchecked soa(self->GetJniEnv());
RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
visitor.VisitArguments();
- method = FindMethodFromCode<kInterface, false>(dex_method_idx,
- &this_object,
- caller_method,
- self);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ interface_method = class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
+ self, dex_method_idx, caller_method, kInterface);
visitor.FixupReferences();
}
- if (UNLIKELY(method == nullptr)) {
+ if (UNLIKELY(interface_method == nullptr)) {
CHECK(self->IsExceptionPending());
return GetTwoWordFailureValue(); // Failure.
}
- interface_method =
- caller_method->GetDexCacheResolvedMethod(dex_method_idx, kRuntimePointerSize);
- DCHECK(!interface_method->IsRuntimeMethod());
+ }
+
+ DCHECK(!interface_method->IsRuntimeMethod());
+ // Look whether we have a match in the ImtConflictTable.
+ uint32_t imt_index = ImTable::GetImtIndex(interface_method);
+ ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize);
+ if (LIKELY(conflict_method->IsRuntimeMethod())) {
+ ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
+ DCHECK(current_table != nullptr);
+ method = current_table->Lookup(interface_method, kRuntimePointerSize);
+ } else {
+ // It seems we aren't really a conflict method!
+ if (kIsDebugBuild) {
+ ArtMethod* m = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
+ CHECK_EQ(conflict_method, m)
+ << interface_method->PrettyMethod() << " / " << conflict_method->PrettyMethod() << " / "
+ << " / " << ArtMethod::PrettyMethod(m) << " / " << cls->PrettyClass();
+ }
+ method = conflict_method;
+ }
+ if (method != nullptr) {
+ return GetTwoWordSuccessValue(
+ reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()),
+ reinterpret_cast<uintptr_t>(method));
+ }
+
+ // No match, use the IfTable.
+ method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
+ if (UNLIKELY(method == nullptr)) {
+ ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(
+ interface_method, this_object.Get(), caller_method);
+ return GetTwoWordFailureValue(); // Failure.
}
// We arrive here if we have found an implementation, and it is not in the ImtConflictTable.
// We create a new table with the new pair { interface_method, method }.
- uint32_t imt_index = ImTable::GetImtIndex(interface_method);
- ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize);
- if (conflict_method->IsRuntimeMethod()) {
- ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable(
- cls.Get(),
- conflict_method,
- interface_method,
- method,
- /*force_new_conflict_method*/false);
- if (new_conflict_method != conflict_method) {
- // Update the IMT if we create a new conflict method. No fence needed here, as the
- // data is consistent.
- imt->Set(imt_index,
- new_conflict_method,
- kRuntimePointerSize);
- }
+ DCHECK(conflict_method->IsRuntimeMethod());
+ ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable(
+ cls.Get(),
+ conflict_method,
+ interface_method,
+ method,
+ /*force_new_conflict_method*/false);
+ if (new_conflict_method != conflict_method) {
+ // Update the IMT if we create a new conflict method. No fence needed here, as the
+ // data is consistent.
+ imt->Set(imt_index,
+ new_conflict_method,
+ kRuntimePointerSize);
}
const void* code = method->GetEntryPointFromQuickCompiledCode();
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 8c06cfd..bd5f77e 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -19,6 +19,7 @@
#include <stdint.h>
#include <memory>
+#include "base/mutex.h"
#include "common_runtime_test.h"
#include "globals.h"
#include "space_bitmap-inl.h"
@@ -145,22 +146,21 @@
explicit RandGen(uint32_t seed) : val_(seed) {}
uint32_t next() {
- val_ = val_ * 48271 % 2147483647;
+ val_ = val_ * 48271 % 2147483647 + 13;
return val_;
}
uint32_t val_;
};
-template <size_t kAlignment>
-void RunTest() NO_THREAD_SAFETY_ANALYSIS {
+template <size_t kAlignment, typename TestFn>
+static void RunTest(TestFn&& fn) NO_THREAD_SAFETY_ANALYSIS {
uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
size_t heap_capacity = 16 * MB;
// Seed with 0x1234 for reproducability.
RandGen r(0x1234);
-
for (int i = 0; i < 5 ; ++i) {
std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
@@ -177,15 +177,9 @@
}
for (int j = 0; j < 50; ++j) {
- size_t count = 0;
- SimpleCounter c(&count);
-
- size_t offset = RoundDown(r.next() % heap_capacity, kAlignment);
- size_t remain = heap_capacity - offset;
- size_t end = offset + RoundDown(r.next() % (remain + 1), kAlignment);
-
- space_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(heap_begin) + offset,
- reinterpret_cast<uintptr_t>(heap_begin) + end, c);
+ const size_t offset = RoundDown(r.next() % heap_capacity, kAlignment);
+ const size_t remain = heap_capacity - offset;
+ const size_t end = offset + RoundDown(r.next() % (remain + 1), kAlignment);
size_t manual = 0;
for (uintptr_t k = offset; k < end; k += kAlignment) {
@@ -194,17 +188,73 @@
}
}
- EXPECT_EQ(count, manual);
+ uintptr_t range_begin = reinterpret_cast<uintptr_t>(heap_begin) + offset;
+ uintptr_t range_end = reinterpret_cast<uintptr_t>(heap_begin) + end;
+
+ fn(space_bitmap.get(), range_begin, range_end, manual);
}
}
}
+template <size_t kAlignment>
+static void RunTestCount() {
+ auto count_test_fn = [](ContinuousSpaceBitmap* space_bitmap,
+ uintptr_t range_begin,
+ uintptr_t range_end,
+ size_t manual_count) {
+ size_t count = 0;
+ auto count_fn = [&count](mirror::Object* obj ATTRIBUTE_UNUSED) {
+ count++;
+ };
+ space_bitmap->VisitMarkedRange(range_begin, range_end, count_fn);
+ EXPECT_EQ(count, manual_count);
+ };
+ RunTest<kAlignment>(count_test_fn);
+}
+
TEST_F(SpaceBitmapTest, VisitorObjectAlignment) {
- RunTest<kObjectAlignment>();
+ RunTestCount<kObjectAlignment>();
}
TEST_F(SpaceBitmapTest, VisitorPageAlignment) {
- RunTest<kPageSize>();
+ RunTestCount<kPageSize>();
+}
+
+template <size_t kAlignment>
+void RunTestOrder() {
+ auto order_test_fn = [](ContinuousSpaceBitmap* space_bitmap,
+ uintptr_t range_begin,
+ uintptr_t range_end,
+ size_t manual_count)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ mirror::Object* last_ptr = nullptr;
+ auto order_check = [&last_ptr](mirror::Object* obj) {
+ EXPECT_LT(last_ptr, obj);
+ last_ptr = obj;
+ };
+
+ // Test complete walk.
+ space_bitmap->Walk(order_check);
+ if (manual_count > 0) {
+ EXPECT_NE(nullptr, last_ptr);
+ }
+
+ // Test range.
+ last_ptr = nullptr;
+ space_bitmap->VisitMarkedRange(range_begin, range_end, order_check);
+ if (manual_count > 0) {
+ EXPECT_NE(nullptr, last_ptr);
+ }
+ };
+ RunTest<kAlignment>(order_test_fn);
+}
+
+TEST_F(SpaceBitmapTest, OrderObjectAlignment) {
+ RunTestOrder<kObjectAlignment>();
+}
+
+TEST_F(SpaceBitmapTest, OrderPageAlignment) {
+ RunTestOrder<kPageSize>();
}
} // namespace accounting
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 3bee560..a030a51 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -121,6 +121,10 @@
return nullptr;
}
+ // Align both capacities to page size, as that's the unit mspaces use.
+ initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
+ max_capacity = RoundDown(max_capacity, 2 * kPageSize);
+
std::string error_str;
// Map name specific for android_os_Debug.cpp accounting.
// Map in low 4gb to simplify accessing root tables for x86_64.
@@ -142,22 +146,21 @@
return nullptr;
}
- // Align both capacities to page size, as that's the unit mspaces use.
- initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
- max_capacity = RoundDown(max_capacity, 2 * kPageSize);
-
// Create a region for JIT data and executable code. This will be
// laid out as:
//
// +----------------+ --------------------
- // : : ^ ^
+ // | code_sync_map_ | ^ code_sync_size ^
+ // | | v |
+ // +----------------+ -- |
+ // : : ^ |
// : post_code_map : | post_code_size |
// : [padding] : v |
// +----------------+ - |
// | | ^ |
- // | code_map | | code_size |
+ // | code_map | | code_size | total_mapping_size
// | [JIT Code] | v |
- // +----------------+ - | total_mapping_size
+ // +----------------+ - |
// : : ^ |
// : pre_code_map : | pre_code_size |
// : [padding] : v |
@@ -167,17 +170,23 @@
// | [Jit Data] | v v
// +----------------+ --------------------
//
+ // The code_sync_map_ contains a page that we use flush CPU instruction
+ // pipelines (see FlushInstructionPipelines()).
+ //
// The padding regions - pre_code_map and post_code_map - exist to
// put some random distance between the writable JIT code mapping
// and the executable mapping. The padding is discarded at the end
// of this function.
- size_t total_mapping_size = kMaxMapSpacingPages * kPageSize;
- size_t data_size = RoundUp((max_capacity - total_mapping_size) / 2, kPageSize);
+ //
+ size_t data_size = (max_capacity - kMaxMapSpacingPages * kPageSize) / 2;
size_t pre_code_size =
- GetRandomNumber(kMinMapSpacingPages, kMaxMapSpacingPages) * kPageSize;
- size_t code_size = max_capacity - total_mapping_size - data_size;
- size_t post_code_size = total_mapping_size - pre_code_size;
- DCHECK_EQ(code_size + data_size + total_mapping_size, max_capacity);
+ GetRandomNumber(kMinMapSpacingPages, kMaxMapSpacingPages - 1) * kPageSize;
+ size_t code_size = max_capacity - data_size - kMaxMapSpacingPages * kPageSize;
+ size_t code_sync_size = kPageSize;
+ size_t post_code_size = kMaxMapSpacingPages * kPageSize - pre_code_size - code_sync_size;
+ DCHECK_EQ(data_size, code_size);
+ DCHECK_EQ(pre_code_size + post_code_size + code_sync_size, kMaxMapSpacingPages * kPageSize);
+ DCHECK_EQ(data_size + pre_code_size + code_size + post_code_size + code_sync_size, max_capacity);
// Create pre-code padding region after data region, discarded after
// code and data regions are set-up.
@@ -191,7 +200,7 @@
return nullptr;
}
DCHECK_EQ(data_map->Size(), data_size);
- DCHECK_EQ(pre_code_map->Size(), pre_code_size + code_size + post_code_size);
+ DCHECK_EQ(pre_code_map->Size(), pre_code_size + code_size + post_code_size + code_sync_size);
// Create code region.
unique_fd writable_code_fd;
@@ -206,7 +215,7 @@
return nullptr;
}
DCHECK_EQ(pre_code_map->Size(), pre_code_size);
- DCHECK_EQ(code_map->Size(), code_size + post_code_size);
+ DCHECK_EQ(code_map->Size(), code_size + post_code_size + code_sync_size);
// Padding after code region, discarded after code and data regions
// are set-up.
@@ -220,7 +229,19 @@
return nullptr;
}
DCHECK_EQ(code_map->Size(), code_size);
+ DCHECK_EQ(post_code_map->Size(), post_code_size + code_sync_size);
+
+ std::unique_ptr<MemMap> code_sync_map(SplitMemMap(post_code_map.get(),
+ "jit-code-sync",
+ post_code_size,
+ kProtCode,
+ error_msg,
+ use_ashmem));
+ if (code_sync_map == nullptr) {
+ return nullptr;
+ }
DCHECK_EQ(post_code_map->Size(), post_code_size);
+ DCHECK_EQ(code_sync_map->Size(), code_sync_size);
std::unique_ptr<MemMap> writable_code_map;
if (use_two_mappings) {
@@ -246,6 +267,7 @@
return new JitCodeCache(writable_code_map.release(),
code_map.release(),
data_map.release(),
+ code_sync_map.release(),
code_size,
data_size,
max_capacity,
@@ -255,6 +277,7 @@
JitCodeCache::JitCodeCache(MemMap* writable_code_map,
MemMap* executable_code_map,
MemMap* data_map,
+ MemMap* code_sync_map,
size_t initial_code_capacity,
size_t initial_data_capacity,
size_t max_capacity,
@@ -265,6 +288,7 @@
data_map_(data_map),
executable_code_map_(executable_code_map),
writable_code_map_(writable_code_map),
+ code_sync_map_(code_sync_map),
max_capacity_(max_capacity),
current_capacity_(initial_code_capacity + initial_data_capacity),
code_end_(initial_code_capacity),
@@ -382,7 +406,7 @@
class ScopedCodeCacheWrite : ScopedTrace {
public:
- explicit ScopedCodeCacheWrite(JitCodeCache* code_cache, bool only_for_tlb_shootdown = false)
+ explicit ScopedCodeCacheWrite(JitCodeCache* code_cache)
: ScopedTrace("ScopedCodeCacheWrite") {
ScopedTrace trace("mprotect all");
int prot_to_start_writing = kProtAll;
@@ -398,7 +422,7 @@
writable_map_ = code_cache->GetWritableMemMap();
// If we're using ScopedCacheWrite only for TLB shootdown, we limit the scope of mprotect to
// one page.
- size_ = only_for_tlb_shootdown ? kPageSize : writable_map_->Size();
+ size_ = writable_map_->Size();
CHECKED_MPROTECT(writable_map_->Begin(), size_, prot_to_start_writing);
}
~ScopedCodeCacheWrite() {
@@ -424,7 +448,6 @@
size_t fp_spill_mask,
const uint8_t* code,
size_t code_size,
- size_t data_size,
bool osr,
Handle<mirror::ObjectArray<mirror::Object>> roots,
bool has_should_deoptimize_flag,
@@ -439,7 +462,6 @@
fp_spill_mask,
code,
code_size,
- data_size,
osr,
roots,
has_should_deoptimize_flag,
@@ -457,7 +479,6 @@
fp_spill_mask,
code,
code_size,
- data_size,
osr,
roots,
has_should_deoptimize_flag,
@@ -621,7 +642,7 @@
// method_headers are expected to be in the executable region.
{
MutexLock mu(Thread::Current(), *Locks::cha_lock_);
- Runtime::Current()->GetClassHierarchyAnalysis()
+ Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
->RemoveDependentsWithMethodHeaders(method_headers);
}
@@ -744,6 +765,20 @@
method->SetCounter(std::min(jit_warmup_threshold - 1, 1));
}
+static void FlushInstructionPiplines(uint8_t* sync_page) {
+ // After updating the JIT code cache we need to force all CPUs to
+ // flush their instruction pipelines. In the absence of system call
+ // to do this explicitly, we can achieve this indirectly by toggling
+ // permissions on an executable page. This should send an IPI to
+ // each core to update the TLB entry with the interrupt raised on
+ // each core causing the instruction pipeline to be flushed.
+ CHECKED_MPROTECT(sync_page, kPageSize, kProtAll);
+ // Ensure the sync_page is present otherwise a TLB update may not be
+ // necessary.
+ sync_page[0] = 0;
+ CHECKED_MPROTECT(sync_page, kPageSize, kProtCode);
+}
+
#ifdef __aarch64__
static void FlushJitCodeCacheRange(uint8_t* code_ptr,
@@ -863,7 +898,6 @@
size_t fp_spill_mask,
const uint8_t* code,
size_t code_size,
- size_t data_size,
bool osr,
Handle<mirror::ObjectArray<mirror::Object>> roots,
bool has_should_deoptimize_flag,
@@ -906,6 +940,7 @@
code_size);
FlushJitCodeCacheRange(code_ptr, writable_ptr, code_size);
+ FlushInstructionPiplines(code_sync_map_->Begin());
DCHECK(!Runtime::Current()->IsAotCompiler());
if (has_should_deoptimize_flag) {
@@ -942,7 +977,7 @@
<< "Should not be using cha on debuggable apps/runs!";
for (ArtMethod* single_impl : cha_single_implementation_list) {
- Runtime::Current()->GetClassHierarchyAnalysis()->AddDependency(
+ Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()->AddDependency(
single_impl, method, method_header);
}
@@ -955,13 +990,10 @@
DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data);
DCHECK_LE(roots_data, stack_map);
FillRootTable(roots_data, roots);
- {
- // Flush data cache, as compiled code references literals in it.
- // We also need a TLB shootdown to act as memory barrier across cores.
- ScopedCodeCacheWrite ccw(this, /* only_for_tlb_shootdown */ true);
- FlushDataCache(reinterpret_cast<char*>(roots_data),
- reinterpret_cast<char*>(roots_data + data_size));
- }
+
+ // Ensure the updates to the root table are visible with a store fence.
+ QuasiAtomic::ThreadFenceSequentiallyConsistent();
+
method_code_map_.Put(code_ptr, method);
if (osr) {
number_of_osr_compilations_++;
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index a062ce4..175501f 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -113,7 +113,6 @@
size_t fp_spill_mask,
const uint8_t* code,
size_t code_size,
- size_t data_size,
bool osr,
Handle<mirror::ObjectArray<mirror::Object>> roots,
bool has_should_deoptimize_flag,
@@ -255,6 +254,7 @@
JitCodeCache(MemMap* code_map,
MemMap* data_map,
MemMap* writable_code_map,
+ MemMap* code_sync_map,
size_t initial_code_capacity,
size_t initial_data_capacity,
size_t max_capacity,
@@ -272,7 +272,6 @@
size_t fp_spill_mask,
const uint8_t* code,
size_t code_size,
- size_t data_size,
bool osr,
Handle<mirror::ObjectArray<mirror::Object>> roots,
bool has_should_deoptimize_flag,
@@ -383,6 +382,9 @@
std::unique_ptr<MemMap> executable_code_map_;
// Mem map which holds a non-executable view of code for JIT.
std::unique_ptr<MemMap> writable_code_map_;
+ // Mem map which holds one executable page that we use for flushing instruction
+ // fetch buffers. The code on this page is never executed.
+ std::unique_ptr<MemMap> code_sync_map_;
// The opaque mspace for allocating code.
void* code_mspace_ GUARDED_BY(lock_);
// The opaque mspace for allocating data.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index da3da0f..4e82480 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -496,7 +496,7 @@
MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
int result = munmap(base_begin_, base_size_);
if (result == -1) {
- PLOG(FATAL) << "munmap failed";
+ PLOG(FATAL) << "munmap failed: " << BaseBegin() << "..." << BaseEnd();
}
}
@@ -560,6 +560,12 @@
size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
+ if (base_size_ == 0u) {
+ // All pages in this MemMap have been handed out. Invalidate base
+ // pointer to prevent the destructor calling munmap() on
+ // zero-length region (which can't succeed).
+ base_begin_ = nullptr;
+ }
size_t tail_size = old_end - new_end;
uint8_t* tail_base_begin = new_base_end;
size_t tail_base_size = old_base_end - new_base_end;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 2712419..6fbf64b 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -63,7 +63,6 @@
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
-#include "cha.h"
#include "class_linker-inl.h"
#include "compiler_callbacks.h"
#include "debugger.h"
@@ -259,8 +258,7 @@
pruned_dalvik_cache_(false),
// Initially assume we perceive jank in case the process state is never updated.
process_state_(kProcessStateJankPerceptible),
- zygote_no_threads_(false),
- cha_(nullptr) {
+ zygote_no_threads_(false) {
static_assert(Runtime::kCalleeSaveSize ==
static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType), "Unexpected size");
@@ -382,7 +380,6 @@
delete monitor_list_;
delete monitor_pool_;
delete class_linker_;
- delete cha_;
delete heap_;
delete intern_table_;
delete oat_file_manager_;
@@ -1287,7 +1284,6 @@
CHECK_GE(GetHeap()->GetContinuousSpaces().size(), 1U);
class_linker_ = new ClassLinker(intern_table_);
- cha_ = new ClassHierarchyAnalysis;
if (GetHeap()->HasBootImageSpace()) {
bool result = class_linker_->InitFromBootImage(&error_msg);
if (!result) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 9424596..7e4b896 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -73,7 +73,6 @@
class ArenaPool;
class ArtMethod;
enum class CalleeSaveType: uint32_t;
-class ClassHierarchyAnalysis;
class ClassLinker;
class CompilerCallbacks;
class DexFile;
@@ -650,10 +649,6 @@
void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
- ClassHierarchyAnalysis* GetClassHierarchyAnalysis() {
- return cha_;
- }
-
void AttachAgent(const std::string& agent_arg);
const std::list<ti::Agent>& GetAgents() const {
@@ -944,8 +939,6 @@
// Generic system-weak holders.
std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
- ClassHierarchyAnalysis* cha_;
-
std::unique_ptr<RuntimeCallbacks> callbacks_;
std::atomic<uint32_t> deoptimization_counts_[
diff --git a/runtime/simulator/Android.bp b/simulator/Android.bp
similarity index 60%
rename from runtime/simulator/Android.bp
rename to simulator/Android.bp
index 03e3f15..a399289 100644
--- a/runtime/simulator/Android.bp
+++ b/simulator/Android.bp
@@ -14,6 +14,12 @@
// limitations under the License.
//
+cc_library_headers {
+ name: "libart_simulator_headers",
+ host_supported: true,
+ export_include_dirs: ["include"],
+}
+
cc_defaults {
name: "libart_simulator_defaults",
host_supported: true,
@@ -29,8 +35,8 @@
"liblog",
],
cflags: ["-DVIXL_INCLUDE_SIMULATOR_AARCH64"],
- export_include_dirs: ["."],
- include_dirs: ["art/runtime"],
+
+ header_libs: ["libart_simulator_headers"],
}
art_cc_library {
@@ -53,3 +59,38 @@
"libvixld-arm64",
],
}
+
+cc_defaults {
+ name: "libart_simulator_container_defaults",
+ host_supported: true,
+
+ defaults: ["art_defaults"],
+ srcs: [
+ "code_simulator_container.cc",
+ ],
+ shared_libs: [
+ "libbase",
+ ],
+
+ header_libs: ["libart_simulator_headers"],
+ export_include_dirs: ["."], // TODO: Consider a proper separation.
+}
+
+art_cc_library {
+ name: "libart-simulator-container",
+ defaults: ["libart_simulator_container_defaults"],
+ shared_libs: [
+ "libart",
+ ],
+}
+
+art_cc_library {
+ name: "libartd-simulator-container",
+ defaults: [
+ "art_debug_defaults",
+ "libart_simulator_container_defaults",
+ ],
+ shared_libs: [
+ "libartd",
+ ],
+}
diff --git a/runtime/simulator/code_simulator.cc b/simulator/code_simulator.cc
similarity index 92%
rename from runtime/simulator/code_simulator.cc
rename to simulator/code_simulator.cc
index 1a11160..e653dfc 100644
--- a/runtime/simulator/code_simulator.cc
+++ b/simulator/code_simulator.cc
@@ -14,8 +14,9 @@
* limitations under the License.
*/
-#include "simulator/code_simulator.h"
-#include "simulator/code_simulator_arm64.h"
+#include "code_simulator.h"
+
+#include "code_simulator_arm64.h"
namespace art {
diff --git a/runtime/simulator/code_simulator_arm64.cc b/simulator/code_simulator_arm64.cc
similarity index 97%
rename from runtime/simulator/code_simulator_arm64.cc
rename to simulator/code_simulator_arm64.cc
index c7ad1fd..939d2e2 100644
--- a/runtime/simulator/code_simulator_arm64.cc
+++ b/simulator/code_simulator_arm64.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "simulator/code_simulator_arm64.h"
+#include "code_simulator_arm64.h"
#include "base/logging.h"
diff --git a/runtime/simulator/code_simulator_arm64.h b/simulator/code_simulator_arm64.h
similarity index 88%
rename from runtime/simulator/code_simulator_arm64.h
rename to simulator/code_simulator_arm64.h
index 59ea34f..0542593 100644
--- a/runtime/simulator/code_simulator_arm64.h
+++ b/simulator/code_simulator_arm64.h
@@ -14,11 +14,10 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_SIMULATOR_CODE_SIMULATOR_ARM64_H_
-#define ART_RUNTIME_SIMULATOR_CODE_SIMULATOR_ARM64_H_
+#ifndef ART_SIMULATOR_CODE_SIMULATOR_ARM64_H_
+#define ART_SIMULATOR_CODE_SIMULATOR_ARM64_H_
#include "memory"
-#include "simulator/code_simulator.h"
// TODO(VIXL): Make VIXL compile with -Wshadow.
#pragma GCC diagnostic push
@@ -26,6 +25,8 @@
#include "aarch64/simulator-aarch64.h"
#pragma GCC diagnostic pop
+#include "code_simulator.h"
+
namespace art {
namespace arm64 {
@@ -55,4 +56,4 @@
} // namespace arm64
} // namespace art
-#endif // ART_RUNTIME_SIMULATOR_CODE_SIMULATOR_ARM64_H_
+#endif // ART_SIMULATOR_CODE_SIMULATOR_ARM64_H_
diff --git a/runtime/code_simulator_container.cc b/simulator/code_simulator_container.cc
similarity index 98%
rename from runtime/code_simulator_container.cc
rename to simulator/code_simulator_container.cc
index d884c58..a5f05dc 100644
--- a/runtime/code_simulator_container.cc
+++ b/simulator/code_simulator_container.cc
@@ -17,6 +17,8 @@
#include <dlfcn.h>
#include "code_simulator_container.h"
+
+#include "code_simulator.h"
#include "globals.h"
namespace art {
diff --git a/runtime/code_simulator_container.h b/simulator/code_simulator_container.h
similarity index 87%
rename from runtime/code_simulator_container.h
rename to simulator/code_simulator_container.h
index 10178ba..31a915e 100644
--- a/runtime/code_simulator_container.h
+++ b/simulator/code_simulator_container.h
@@ -14,15 +14,16 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_CODE_SIMULATOR_CONTAINER_H_
-#define ART_RUNTIME_CODE_SIMULATOR_CONTAINER_H_
+#ifndef ART_SIMULATOR_CODE_SIMULATOR_CONTAINER_H_
+#define ART_SIMULATOR_CODE_SIMULATOR_CONTAINER_H_
#include "arch/instruction_set.h"
#include "base/logging.h"
-#include "simulator/code_simulator.h"
namespace art {
+class CodeSimulator;
+
// This container dynamically opens and closes libart-simulator.
class CodeSimulatorContainer {
public:
@@ -52,4 +53,4 @@
} // namespace art
-#endif // ART_RUNTIME_CODE_SIMULATOR_CONTAINER_H_
+#endif // ART_SIMULATOR_CODE_SIMULATOR_CONTAINER_H_
diff --git a/runtime/simulator/code_simulator.h b/simulator/include/code_simulator.h
similarity index 89%
rename from runtime/simulator/code_simulator.h
rename to simulator/include/code_simulator.h
index bd48909..256ab23 100644
--- a/runtime/simulator/code_simulator.h
+++ b/simulator/include/code_simulator.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_SIMULATOR_CODE_SIMULATOR_H_
-#define ART_RUNTIME_SIMULATOR_CODE_SIMULATOR_H_
+#ifndef ART_SIMULATOR_INCLUDE_CODE_SIMULATOR_H_
+#define ART_SIMULATOR_INCLUDE_CODE_SIMULATOR_H_
#include "arch/instruction_set.h"
@@ -43,4 +43,4 @@
} // namespace art
-#endif // ART_RUNTIME_SIMULATOR_CODE_SIMULATOR_H_
+#endif // ART_SIMULATOR_INCLUDE_CODE_SIMULATOR_H_
diff --git a/test/616-cha/src/Main.java b/test/616-cha/src/Main.java
index beea90a..27da7cc 100644
--- a/test/616-cha/src/Main.java
+++ b/test/616-cha/src/Main.java
@@ -187,7 +187,12 @@
System.loadLibrary(args[0]);
// CHeck some boot-image methods.
- assertSingleImplementation(java.util.ArrayList.class, "size", true);
+
+ // We would want to have this, but currently setting single-implementation in the boot image
+ // does not work well with app images. b/34193647
+ final boolean ARRAYLIST_SIZE_EXPECTED = false;
+ assertSingleImplementation(java.util.ArrayList.class, "size", ARRAYLIST_SIZE_EXPECTED);
+
// java.util.LinkedHashMap overrides get().
assertSingleImplementation(java.util.HashMap.class, "get", false);
diff --git a/test/660-store-8-16/expected.txt b/test/660-store-8-16/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/660-store-8-16/expected.txt
diff --git a/test/660-store-8-16/info.txt b/test/660-store-8-16/info.txt
new file mode 100644
index 0000000..aad6c56
--- /dev/null
+++ b/test/660-store-8-16/info.txt
@@ -0,0 +1,3 @@
+Regression test for the compiler whose x86 and x64 backends
+used to crash on 8bits / 16bits immediate stores when the Java
+input was a wide immediate.
diff --git a/test/660-store-8-16/smali/TestCase.smali b/test/660-store-8-16/smali/TestCase.smali
new file mode 100644
index 0000000..ec8cbd8
--- /dev/null
+++ b/test/660-store-8-16/smali/TestCase.smali
@@ -0,0 +1,102 @@
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+
+.super Ljava/lang/Object;
+
+.method public static setByteArray([B)V
+ .registers 3
+ const/16 v0, 0x0
+ const/16 v1, 0x0101
+ aput-byte v1, p0, v0
+ return-void
+.end method
+
+.method public static setByteStaticField()V
+ .registers 1
+ const/16 v0, 0x0101
+ sput-byte v0, LTestCase;->staticByteField:B
+ return-void
+.end method
+
+.method public static setByteInstanceField(LTestCase;)V
+ .registers 2
+ const/16 v0, 0x0101
+ iput-byte v0, p0, LTestCase;->instanceByteField:B
+ return-void
+.end method
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public static setShortArray([S)V
+ .registers 3
+ const/16 v0, 0x0
+ const v1, 0x10101
+ aput-short v1, p0, v0
+ return-void
+.end method
+
+.method public static setShortStaticField()V
+ .registers 1
+ const v0, 0x10101
+ sput-short v0, LTestCase;->staticShortField:S
+ return-void
+.end method
+
+.method public static setShortInstanceField(LTestCase;)V
+ .registers 2
+ const v0, 0x10101
+ iput-short v0, p0, LTestCase;->instanceShortField:S
+ return-void
+.end method
+
+.method public static setCharArray([C)V
+ .registers 3
+ const/16 v0, 0x0
+ const v1, 0x10101
+ aput-char v1, p0, v0
+ return-void
+.end method
+
+.method public static setCharStaticField()V
+ .registers 1
+ const v0, 0x10101
+ sput-char v0, LTestCase;->staticCharField:C
+ return-void
+.end method
+
+.method public static setCharInstanceField(LTestCase;)V
+ .registers 2
+ const v0, 0x10101
+ iput-char v0, p0, LTestCase;->instanceCharField:C
+ return-void
+.end method
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.field public static staticByteField:B
+.field public instanceByteField:B
+.field public static staticShortField:S
+.field public instanceShortField:S
+.field public static staticCharField:C
+.field public instanceCharField:C
diff --git a/test/660-store-8-16/src/Main.java b/test/660-store-8-16/src/Main.java
new file mode 100644
index 0000000..32b2568
--- /dev/null
+++ b/test/660-store-8-16/src/Main.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void expectEquals(int expected, int actual) {
+ if (expected != actual) {
+ throw new Error("Expected " + expected + ", got " + actual);
+ }
+ }
+
+ public static void main(String[] unused) throws Exception {
+ Class<?> cls = Class.forName("TestCase");
+
+ cls.getMethod("setByteStaticField").invoke(null);
+ expectEquals(1, cls.getField("staticByteField").getByte(null));
+
+ cls.getMethod("setShortStaticField").invoke(null);
+ expectEquals(0x101, cls.getField("staticShortField").getShort(null));
+
+ cls.getMethod("setCharStaticField").invoke(null);
+ expectEquals(0x101, cls.getField("staticCharField").getChar(null));
+
+ {
+ Object[] args = { new byte[2] };
+ cls.getMethod("setByteArray", byte[].class).invoke(null, args);
+ expectEquals(1, ((byte[])args[0])[0]);
+ }
+ {
+ Object[] args = { new short[2] };
+ cls.getMethod("setShortArray", short[].class).invoke(null, args);
+ expectEquals(0x101, ((short[])args[0])[0]);
+ }
+ {
+ Object[] args = { new char[2] };
+ cls.getMethod("setCharArray", char[].class).invoke(null, args);
+ expectEquals(0x101, ((char[])args[0])[0]);
+ }
+ {
+ Object[] args = { cls.newInstance() };
+
+ cls.getMethod("setByteInstanceField", cls).invoke(null, args);
+ expectEquals(1, cls.getField("staticByteField").getByte(args[0]));
+
+ cls.getMethod("setShortInstanceField", cls).invoke(null, args);
+ expectEquals(0x101, cls.getField("staticShortField").getShort(args[0]));
+
+ cls.getMethod("setCharInstanceField", cls).invoke(null, args);
+ expectEquals(0x101, cls.getField("staticCharField").getChar(args[0]));
+ }
+ }
+}
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 53611a8..09e76fa 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -712,5 +712,11 @@
"lookup changes" ],
"bug": "b/63089991",
"env_vars": {"ANDROID_COMPILE_WITH_JACK": "false"}
+ },
+ {
+ "tests": "660-clinit",
+ "variant": "no-image | no-dex2oat | no-prebuild",
+ "description": ["Tests <clinit> for app images, which --no-image, --no-prebuild and",
+ "--no-dex2oat do not create"]
}
]
diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt
index 38556ab..4471c0a 100644
--- a/tools/ahat/README.txt
+++ b/tools/ahat/README.txt
@@ -75,7 +75,11 @@
* Instance.isRoot and Instance.getRootTypes.
Release History:
- 1.3 Pending
+ 1.4 Pending
+
+ 1.3 July 25, 2017
+ Improve diffing of static and instance fields.
+ Improve startup performance by roughly 25%.
1.2 May 26, 2017
Show registered native sizes of objects.
diff --git a/tools/ahat/src/manifest.txt b/tools/ahat/src/manifest.txt
index c35ccf1..d893c5e 100644
--- a/tools/ahat/src/manifest.txt
+++ b/tools/ahat/src/manifest.txt
@@ -1,4 +1,4 @@
Name: ahat/
Implementation-Title: ahat
-Implementation-Version: 1.2
+Implementation-Version: 1.3
Main-Class: com.android.ahat.Main
diff --git a/tools/dexfuzz/README b/tools/dexfuzz/README
index a635fe9..1f74262 100644
--- a/tools/dexfuzz/README
+++ b/tools/dexfuzz/README
@@ -145,7 +145,7 @@
PoolIndexChanger 30
RandomBranchChanger 30
RandomInstructionGenerator 30
-RegisterClobber 40
+RegisterClobber 10
SwitchBranchShifter 30
TryBlockShifter 40
ValuePrinter 40
diff --git a/tools/dexfuzz/src/dexfuzz/DexFuzz.java b/tools/dexfuzz/src/dexfuzz/DexFuzz.java
index d37bd34..2b3b8e7 100644
--- a/tools/dexfuzz/src/dexfuzz/DexFuzz.java
+++ b/tools/dexfuzz/src/dexfuzz/DexFuzz.java
@@ -33,9 +33,9 @@
* Entrypoint class for dexfuzz.
*/
public class DexFuzz {
- // Last version update 1.5: added register clobber mutator.
+ // Last version update 1.7: changed the likelihood of RegisterClobber.
private static int majorVersion = 1;
- private static int minorVersion = 5;
+ private static int minorVersion = 7;
private static int seedChangeVersion = 0;
/**
diff --git a/tools/dexfuzz/src/dexfuzz/program/mutators/NewArrayLengthChanger.java b/tools/dexfuzz/src/dexfuzz/program/mutators/NewArrayLengthChanger.java
index aba7971..e640b4e 100644
--- a/tools/dexfuzz/src/dexfuzz/program/mutators/NewArrayLengthChanger.java
+++ b/tools/dexfuzz/src/dexfuzz/program/mutators/NewArrayLengthChanger.java
@@ -28,8 +28,6 @@
import java.util.List;
import java.util.Random;
-// This mutation might change the length of an array but can also change the
-// value of the register in every place it is used.
public class NewArrayLengthChanger extends CodeMutator {
/**
* Every CodeMutator has an AssociatedMutation, representing the
@@ -116,20 +114,46 @@
MutatableCode mutatableCode = mutation.mutatableCode;
MInsn newArrayInsn = newArrayLengthInsns.get(mutation.newArrayToChangeIdx);
int newArrayInsnIdx = mutatableCode.getInstructionIndex(newArrayInsn);
+ // If the original new-array instruction is no longer present
+ // in the code (as indicated by a negative index), we make a
+ // best effort to find any other new-array instruction to
+ // apply the mutation to. If that effort fails, we simply
+ // bail by doing nothing.
+ if (newArrayInsnIdx < 0) {
+ newArrayInsnIdx = scanNewArray(mutatableCode);
+ if (newArrayInsnIdx == -1) {
+ return;
+ }
+ }
MInsn newInsn = new MInsn();
newInsn.insn = new Instruction();
newInsn.insn.info = Instruction.getOpcodeInfo(Opcode.CONST_16);
+ mutatableCode.allocateTemporaryVRegs(1);
+ newArrayInsn.insn.vregB = mutatableCode.getTemporaryVReg(0);
newInsn.insn.vregA = (int) newArrayInsn.insn.vregB;
// New length chosen randomly between 1 to 100.
newInsn.insn.vregB = rng.nextInt(100);
mutatableCode.insertInstructionAt(newInsn, newArrayInsnIdx);
Log.info("Changed the length of the array to " + newInsn.insn.vregB);
stats.incrementStat("Changed length of new array");
+ mutatableCode.finishedUsingTemporaryVRegs();
}
private boolean isNewArray(MInsn mInsn) {
Opcode opcode = mInsn.insn.info.opcode;
return opcode == Opcode.NEW_ARRAY;
}
+
+ // Return the index of first new-array in the method, -1 otherwise.
+ private int scanNewArray(MutatableCode mutatableCode) {
+ int idx = 0;
+ for (MInsn mInsn : mutatableCode.getInstructions()) {
+ if (isNewArray(mInsn)) {
+ return idx;
+ }
+ idx++;
+ }
+ return -1;
+ }
}
\ No newline at end of file
diff --git a/tools/dexfuzz/src/dexfuzz/program/mutators/RegisterClobber.java b/tools/dexfuzz/src/dexfuzz/program/mutators/RegisterClobber.java
index 11da1d4..90f4f0f 100644
--- a/tools/dexfuzz/src/dexfuzz/program/mutators/RegisterClobber.java
+++ b/tools/dexfuzz/src/dexfuzz/program/mutators/RegisterClobber.java
@@ -61,7 +61,7 @@
public RegisterClobber(Random rng, MutationStats stats, List<Mutation> mutations) {
super(rng, stats, mutations);
- likelihood = 40;
+ likelihood = 10;
}
@Override
@@ -90,6 +90,7 @@
newInsn.insn = new Instruction();
newInsn.insn.info = Instruction.getOpcodeInfo(Opcode.CONST_16);
newInsn.insn.vregA = i;
+ // Used zero because it may also apply to objects, resulting in fewer verification failures.
newInsn.insn.vregB = 0;
mutatableCode.insertInstructionAt(newInsn, mutation.regClobberIdx + i);
}
diff --git a/tools/generate_cmake_lists.py b/tools/generate_cmake_lists.py
new file mode 100755
index 0000000..6c3ce08
--- /dev/null
+++ b/tools/generate_cmake_lists.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+#
+# Copyright 2017, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+./generate_cmake_lists.py --project-name <project-name> --arch <arch>
+
+- project-name - name of the new project
+- arch - arch type. make generates seperate CMakeLists files for
+ each architecture. To avoid collision in targets, only one of
+ them can be included in the super project.
+
+The primary objective of this file is to generate CMakeLists files
+for CLion setup.
+
+Steps to setup CLion.
+1) Open the generated CMakeList file in CLion as a project.
+2) Change the project root ANDROID_BUILD_TOP.
+(Also, exclude projects that you don't bother about. This will make
+the indexing faster).
+"""
+
+import sys
+import os
+import subprocess
+import argparse
+
+def get_android_build_top():
+ path_to_top = os.environ.get('ANDROID_BUILD_TOP')
+ if not path_to_top:
+ # nothing set. try to guess it based on the relative path of this env.py file.
+ this_file_path = os.path.realpath(__file__)
+ path_to_top = os.path.join(os.path.dirname(this_file_path), '../..')
+ path_to_top = os.path.realpath(path_to_top)
+
+ if not os.path.exists(os.path.join(path_to_top, 'build/envsetup.sh')):
+ print path_to_top
+ raise AssertionError("geneate_cmake_lists.py must be located inside an android source tree")
+
+ return path_to_top
+
+def main():
+ # Parse arguments
+ parser = argparse.ArgumentParser(description="Generate CMakeLists files for ART")
+ parser.add_argument('--project-name', dest="project_name", required=True,
+ help='name of the project')
+ parser.add_argument('--arch', dest="arch", required=True, help='arch')
+ args = parser.parse_args()
+ project_name = args.project_name
+ arch = args.arch
+
+ # Invoke make to generate CMakeFiles
+ os.environ['SOONG_GEN_CMAKEFILES']='1'
+ os.environ['SOONG_GEN_CMAKEFILES_DEBUG']='1'
+
+ ANDROID_BUILD_TOP = get_android_build_top()
+
+ subprocess.check_output(('make -j64 -C %s') % (ANDROID_BUILD_TOP), shell=True)
+
+ out_art_cmakelists_dir = os.path.join(ANDROID_BUILD_TOP,
+ 'out/development/ide/clion/art')
+
+ # Prepare a list of directories containing generated CMakeLists files for sub projects.
+ cmake_sub_dirs = set()
+ for root, dirs, files in os.walk(out_art_cmakelists_dir):
+ for name in files:
+ if name == 'CMakeLists.txt':
+ if (os.path.samefile(root, out_art_cmakelists_dir)):
+ continue
+ if arch not in root:
+ continue
+ cmake_sub_dir = cmake_sub_dirs.add(root.replace(out_art_cmakelists_dir,
+ '.'))
+
+ # Generate CMakeLists file.
+ f = open(os.path.join(out_art_cmakelists_dir, 'CMakeLists.txt'), 'w')
+ f.write('cmake_minimum_required(VERSION 3.6)\n')
+ f.write('project(%s)\n' % (project_name))
+
+ for dr in cmake_sub_dirs:
+ f.write('add_subdirectory(%s)\n' % (dr))
+
+
+if __name__ == '__main__':
+ main()