Merge "Clean up ZygoteHooks stack walking slightly."
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index f53740e..c733feb 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -109,7 +109,7 @@
--oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
--base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(2)ART_HOST_ARCH) \
$$(LOCAL_$(2)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \
- --host --android-root=$$(HOST_OUT) --include-patch-information \
+ --host --android-root=$$(HOST_OUT) \
--generate-debug-info --generate-build-id --compile-pic \
$$(PRIVATE_CORE_MULTI_PARAM) $$(PRIVATE_CORE_COMPILE_OPTIONS)
@@ -212,7 +212,7 @@
--base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(2)TARGET_ARCH) \
--instruction-set-variant=$$($(2)DEX2OAT_TARGET_CPU_VARIANT) \
--instruction-set-features=$$($(2)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
- --android-root=$$(PRODUCT_OUT)/system --include-patch-information \
+ --android-root=$$(PRODUCT_OUT)/system \
--generate-debug-info --generate-build-id --compile-pic \
$$(PRIVATE_CORE_COMPILE_OPTIONS) || (rm $$(PRIVATE_CORE_OAT_NAME); exit 1)
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 7e91453..a5e4cb0 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -2283,7 +2283,7 @@
public:
explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
- virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
ATRACE_CALL();
jobject jclass_loader = manager_->GetClassLoader();
const DexFile& dex_file = *manager_->GetDexFile();
@@ -2343,23 +2343,32 @@
// mode which prevents the GC from visiting objects modified during the transaction.
// Ensure GC is not run so don't access freed objects when aborting transaction.
- ScopedAssertNoThreadSuspension ants("Transaction end");
- runtime->ExitTransactionMode();
+ {
+ ScopedAssertNoThreadSuspension ants("Transaction end");
+ runtime->ExitTransactionMode();
+
+ if (!success) {
+ CHECK(soa.Self()->IsExceptionPending());
+ mirror::Throwable* exception = soa.Self()->GetException();
+ VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
+ << exception->Dump();
+ std::ostream* file_log = manager_->GetCompiler()->
+ GetCompilerOptions().GetInitFailureOutput();
+ if (file_log != nullptr) {
+ *file_log << descriptor << "\n";
+ *file_log << exception->Dump() << "\n";
+ }
+ soa.Self()->ClearException();
+ transaction.Rollback();
+ CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
+ }
+ }
if (!success) {
- CHECK(soa.Self()->IsExceptionPending());
- mirror::Throwable* exception = soa.Self()->GetException();
- VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
- << exception->Dump();
- std::ostream* file_log = manager_->GetCompiler()->
- GetCompilerOptions().GetInitFailureOutput();
- if (file_log != nullptr) {
- *file_log << descriptor << "\n";
- *file_log << exception->Dump() << "\n";
- }
- soa.Self()->ClearException();
- transaction.Rollback();
- CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
+ // On failure, still intern strings of static fields and seen in <clinit>, as these
+ // will be created in the zygote. This is separated from the transaction code just
+ // above as we will allocate strings, so must be allowed to suspend.
+ InternStrings(klass, class_loader);
}
}
}
@@ -2375,6 +2384,57 @@
}
private:
+ void InternStrings(Handle<mirror::Class> klass, Handle<mirror::ClassLoader> class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(manager_->GetCompiler()->GetCompilerOptions().IsBootImage());
+ DCHECK(klass->IsVerified());
+ DCHECK(!klass->IsInitialized());
+
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::DexCache> h_dex_cache = hs.NewHandle(klass->GetDexCache());
+ const DexFile* dex_file = manager_->GetDexFile();
+ const DexFile::ClassDef* class_def = klass->GetClassDef();
+ ClassLinker* class_linker = manager_->GetClassLinker();
+
+ // Check encoded final field values for strings and intern.
+ annotations::RuntimeEncodedStaticFieldValueIterator value_it(*dex_file,
+ &h_dex_cache,
+ &class_loader,
+ manager_->GetClassLinker(),
+ *class_def);
+ for ( ; value_it.HasNext(); value_it.Next()) {
+ if (value_it.GetValueType() == annotations::RuntimeEncodedStaticFieldValueIterator::kString) {
+ // Resolve the string. This will intern the string.
+ art::ObjPtr<mirror::String> resolved = class_linker->ResolveString(
+ *dex_file, dex::StringIndex(value_it.GetJavaValue().i), h_dex_cache);
+ CHECK(resolved != nullptr);
+ }
+ }
+
+ // Intern strings seen in <clinit>.
+ ArtMethod* clinit = klass->FindClassInitializer(class_linker->GetImagePointerSize());
+ if (clinit != nullptr) {
+ const DexFile::CodeItem* code_item = clinit->GetCodeItem();
+ DCHECK(code_item != nullptr);
+ const Instruction* inst = Instruction::At(code_item->insns_);
+
+ const uint32_t insns_size = code_item->insns_size_in_code_units_;
+ for (uint32_t dex_pc = 0; dex_pc < insns_size;) {
+ if (inst->Opcode() == Instruction::CONST_STRING) {
+ ObjPtr<mirror::String> s = class_linker->ResolveString(
+ *dex_file, dex::StringIndex(inst->VRegB_21c()), h_dex_cache);
+ CHECK(s != nullptr);
+ } else if (inst->Opcode() == Instruction::CONST_STRING_JUMBO) {
+ ObjPtr<mirror::String> s = class_linker->ResolveString(
+ *dex_file, dex::StringIndex(inst->VRegB_31c()), h_dex_cache);
+ CHECK(s != nullptr);
+ }
+ dex_pc += inst->SizeInCodeUnits();
+ inst = inst->Next();
+ }
+ }
+ }
+
const ParallelCompilationManager* const manager_;
};
diff --git a/compiler/intrinsics_list.h b/compiler/intrinsics_list.h
index 9bd25d8..63c23cb 100644
--- a/compiler/intrinsics_list.h
+++ b/compiler/intrinsics_list.h
@@ -24,6 +24,10 @@
// Note: adding a new intrinsic requires an art image version change,
// as the modifiers flag for some ArtMethods will need to be changed.
+// Note: j.l.Integer.valueOf says kNoThrow even though it could throw an OOME.
+// The kNoThrow should be renamed to kNoVisibleThrow, as it is ok to GVN Integer.valueOf
+// (kNoSideEffects), and it is also OK to remove it if it's unused.
+
#define INTRINSICS_LIST(V) \
V(DoubleDoubleToRawLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToRawLongBits", "(D)J") \
V(DoubleDoubleToLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToLongBits", "(D)J") \
@@ -149,7 +153,8 @@
V(UnsafeLoadFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "loadFence", "()V") \
V(UnsafeStoreFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "storeFence", "()V") \
V(UnsafeFullFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "fullFence", "()V") \
- V(ReferenceGetReferent, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/ref/Reference;", "getReferent", "()Ljava/lang/Object;")
+ V(ReferenceGetReferent, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/ref/Reference;", "getReferent", "()Ljava/lang/Object;") \
+ V(IntegerValueOf, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "valueOf", "(I)Ljava/lang/Integer;")
#endif // ART_COMPILER_INTRINSICS_LIST_H_
#undef ART_COMPILER_INTRINSICS_LIST_H_ // #define is only for lint.
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 66111f6..e2233e4 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -265,6 +265,7 @@
void TestDexFileInput(bool verify, bool low_4gb, bool use_profile);
void TestZipFileInput(bool verify);
+ void TestZipFileInputWithEmptyDex();
std::unique_ptr<const InstructionSetFeatures> insn_features_;
std::unique_ptr<QuickCompilerCallbacks> callbacks_;
@@ -821,6 +822,28 @@
TestZipFileInput(true);
}
+void OatTest::TestZipFileInputWithEmptyDex() {
+ ScratchFile zip_file;
+ ZipBuilder zip_builder(zip_file.GetFile());
+ bool success = zip_builder.AddFile("classes.dex", nullptr, 0);
+ ASSERT_TRUE(success);
+ success = zip_builder.Finish();
+ ASSERT_TRUE(success) << strerror(errno);
+
+ SafeMap<std::string, std::string> key_value_store;
+ key_value_store.Put(OatHeader::kImageLocationKey, "test.art");
+ std::vector<const char*> input_filenames { zip_file.GetFilename().c_str() }; // NOLINT [readability/braces] [4]
+ ScratchFile oat_file, vdex_file(oat_file, ".vdex");
+ std::unique_ptr<ProfileCompilationInfo> profile_compilation_info(new ProfileCompilationInfo());
+ success = WriteElf(vdex_file.GetFile(), oat_file.GetFile(), input_filenames,
+ key_value_store, /*verify*/false, profile_compilation_info.get());
+ ASSERT_FALSE(success);
+}
+
+TEST_F(OatTest, ZipFileInputWithEmptyDex) {
+ TestZipFileInputWithEmptyDex();
+}
+
TEST_F(OatTest, UpdateChecksum) {
InstructionSet insn_set = kX86;
std::string error_msg;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 0ea1125..8ab44d2 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -104,6 +104,13 @@
// Defines the location of the raw dex file to write.
class OatWriter::DexFileSource {
public:
+ enum Type {
+ kNone,
+ kZipEntry,
+ kRawFile,
+ kRawData,
+ };
+
explicit DexFileSource(ZipEntry* zip_entry)
: type_(kZipEntry), source_(zip_entry) {
DCHECK(source_ != nullptr);
@@ -119,6 +126,7 @@
DCHECK(source_ != nullptr);
}
+ Type GetType() const { return type_; }
bool IsZipEntry() const { return type_ == kZipEntry; }
bool IsRawFile() const { return type_ == kRawFile; }
bool IsRawData() const { return type_ == kRawData; }
@@ -147,13 +155,6 @@
}
private:
- enum Type {
- kNone,
- kZipEntry,
- kRawFile,
- kRawData,
- };
-
Type type_;
const void* source_;
};
@@ -2259,16 +2260,38 @@
ZipEntry* zip_entry = oat_dex_file->source_.GetZipEntry();
std::unique_ptr<MemMap> mem_map(
zip_entry->ExtractToMemMap(location.c_str(), "classes.dex", &error_msg));
+ if (mem_map == nullptr) {
+ LOG(ERROR) << "Failed to extract dex file to mem map for layout: " << error_msg;
+ return false;
+ }
dex_file = DexFile::Open(location,
zip_entry->GetCrc32(),
std::move(mem_map),
/* verify */ true,
/* verify_checksum */ true,
&error_msg);
- } else {
- DCHECK(oat_dex_file->source_.IsRawFile());
+ } else if (oat_dex_file->source_.IsRawFile()) {
File* raw_file = oat_dex_file->source_.GetRawFile();
dex_file = DexFile::OpenDex(raw_file->Fd(), location, /* verify_checksum */ true, &error_msg);
+ } else {
+ // The source data is a vdex file.
+ CHECK(oat_dex_file->source_.IsRawData())
+ << static_cast<size_t>(oat_dex_file->source_.GetType());
+ const uint8_t* raw_dex_file = oat_dex_file->source_.GetRawData();
+ // Note: The raw data has already been checked to contain the header
+ // and all the data that the header specifies as the file size.
+ DCHECK(raw_dex_file != nullptr);
+ DCHECK(ValidateDexFileHeader(raw_dex_file, oat_dex_file->GetLocation()));
+ const UnalignedDexFileHeader* header = AsUnalignedDexFileHeader(raw_dex_file);
+ // Since the source may have had its layout changed, don't verify the checksum.
+ dex_file = DexFile::Open(raw_dex_file,
+ header->file_size_,
+ location,
+ oat_dex_file->dex_file_location_checksum_,
+ nullptr,
+ /* verify */ true,
+ /* verify_checksum */ false,
+ &error_msg);
}
if (dex_file == nullptr) {
LOG(ERROR) << "Failed to open dex file for layout: " << error_msg;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index edccbd4..18c95b3 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4094,7 +4094,7 @@
}
void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
+ IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_);
if (intrinsic.TryDispatch(invoke)) {
return;
}
@@ -4107,7 +4107,7 @@
// art::PrepareForRegisterAllocation.
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
- IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
+ IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_);
if (intrinsic.TryDispatch(invoke)) {
return;
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index c9dde7c..791e632 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -2073,6 +2073,11 @@
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
}
+
+ if (type == Primitive::kPrimNot) {
+ Register out = locations->Out().AsRegister<Register>();
+ __ MaybeUnpoisonHeapReference(out);
+ }
}
void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) {
@@ -2200,7 +2205,31 @@
DCHECK(!needs_write_barrier);
} else {
Register value = value_location.AsRegister<Register>();
- __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
+ if (kPoisonHeapReferences && needs_write_barrier) {
+ // Note that in the case where `value` is a null reference,
+ // we do not enter this block, as a null reference does not
+ // need poisoning.
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ // Use Sw() instead of StoreToOffset() in order to be able to
+ // hold the poisoned reference in AT and thus avoid allocating
+ // yet another temporary register.
+ if (index.IsConstant()) {
+ if (!IsInt<16>(static_cast<int32_t>(data_offset))) {
+ int16_t low = Low16Bits(data_offset);
+ uint32_t high = data_offset - low;
+ __ Addiu32(TMP, obj, high);
+ base_reg = TMP;
+ data_offset = low;
+ }
+ } else {
+ DCHECK(IsInt<16>(static_cast<int32_t>(data_offset)));
+ }
+ __ PoisonHeapReference(AT, value);
+ __ Sw(AT, base_reg, data_offset);
+ null_checker();
+ } else {
+ __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
+ }
if (needs_write_barrier) {
DCHECK_EQ(value_type, Primitive::kPrimNot);
codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
@@ -2208,6 +2237,8 @@
}
} else {
DCHECK_EQ(value_type, Primitive::kPrimNot);
+ // Note: if heap poisoning is enabled, pAputObject takes care
+ // of poisoning the reference.
codegen_->InvokeRuntime(kQuickAputObject, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
}
@@ -2322,6 +2353,7 @@
__ Beqz(obj, slow_path->GetExitLabel());
// Compare the class of `obj` with `cls`.
__ LoadFromOffset(kLoadWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
+ __ MaybeUnpoisonHeapReference(obj_cls);
__ Bne(obj_cls, cls, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
@@ -4958,6 +4990,9 @@
dst = locations->Out().AsRegister<Register>();
}
__ LoadFromOffset(load_type, dst, obj, offset, null_checker);
+ if (type == Primitive::kPrimNot) {
+ __ MaybeUnpoisonHeapReference(dst);
+ }
} else {
DCHECK(locations->Out().IsFpuRegister());
FRegister dst = locations->Out().AsFpuRegister<FRegister>();
@@ -5016,6 +5051,7 @@
StoreOperandType store_type = kStoreByte;
bool is_volatile = field_info.IsVolatile();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
+ bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1));
auto null_checker = GetImplicitNullChecker(instruction);
switch (type) {
@@ -5089,7 +5125,16 @@
} else {
src = value_location.AsRegister<Register>();
}
- __ StoreToOffset(store_type, src, obj, offset, null_checker);
+ if (kPoisonHeapReferences && needs_write_barrier) {
+ // Note that in the case where `value` is a null reference,
+ // we do not enter this block, as a null reference does not
+ // need poisoning.
+ DCHECK_EQ(type, Primitive::kPrimNot);
+ __ PoisonHeapReference(TMP, src);
+ __ StoreToOffset(store_type, TMP, obj, offset, null_checker);
+ } else {
+ __ StoreToOffset(store_type, src, obj, offset, null_checker);
+ }
} else {
FRegister src = value_location.AsFpuRegister<FRegister>();
if (type == Primitive::kPrimFloat) {
@@ -5101,7 +5146,7 @@
}
// TODO: memory barriers?
- if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
+ if (needs_write_barrier) {
Register src = value_location.AsRegister<Register>();
codegen_->MarkGCCard(obj, src, value_can_be_null);
}
@@ -5173,6 +5218,7 @@
// Compare the class of `obj` with `cls`.
__ LoadFromOffset(kLoadWord, out, obj, mirror::Object::ClassOffset().Int32Value());
+ __ MaybeUnpoisonHeapReference(out);
if (instruction->IsExactCheck()) {
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
@@ -5239,6 +5285,14 @@
__ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // Instead of simply (possibly) unpoisoning `temp` here, we should
+ // emit a read barrier for the previous class reference load.
+ // However this is not required in practice, as this is an
+ // intermediate/temporary reference and because the current
+ // concurrent copying collector keeps the from-space memory
+ // intact/accessible until the end of the marking phase (the
+ // concurrent copying collector may not in the future).
+ __ MaybeUnpoisonHeapReference(temp);
__ LoadFromOffset(kLoadWord, temp, temp,
mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
@@ -5562,6 +5616,14 @@
// temp = object->GetClass();
__ LoadFromOffset(kLoadWord, temp, receiver, class_offset);
MaybeRecordImplicitNullCheck(invoke);
+ // Instead of simply (possibly) unpoisoning `temp` here, we should
+ // emit a read barrier for the previous class reference load.
+ // However this is not required in practice, as this is an
+ // intermediate/temporary reference and because the current
+ // concurrent copying collector keeps the from-space memory
+ // intact/accessible until the end of the marking phase (the
+ // concurrent copying collector may not in the future).
+ __ MaybeUnpoisonHeapReference(temp);
// temp = temp->GetMethodAt(method_offset);
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// T9 = temp->GetEntryPoint();
@@ -5692,7 +5754,7 @@
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
bool reordering = __ SetReorder(false);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
- __ LoadFromOffset(kLoadWord, out, out, /* placeholder */ 0x5678);
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678);
__ SetReorder(reordering);
generate_null_check = true;
break;
@@ -5837,7 +5899,7 @@
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
bool reordering = __ SetReorder(false);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
- __ LoadFromOffset(kLoadWord, out, out, /* placeholder */ 0x5678);
+ GenerateGcRootFieldLoad(load, out_loc, out, /* placeholder */ 0x5678);
__ SetReorder(reordering);
SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
codegen_->AddSlowPath(slow_path);
@@ -6059,6 +6121,8 @@
}
void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
+ // Note: if heap poisoning is enabled, the entry point takes care
+ // of poisoning the reference.
codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
}
@@ -6076,6 +6140,8 @@
}
void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
+ // Note: if heap poisoning is enabled, the entry point takes care
+ // of poisoning the reference.
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
Register temp = instruction->GetLocations()->GetTemp(0).AsRegister<Register>();
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 5be0da4..817854b 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1653,6 +1653,11 @@
if (!maybe_compressed_char_at) {
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
+
+ if (type == Primitive::kPrimNot) {
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ __ MaybeUnpoisonHeapReference(out);
+ }
}
void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
@@ -1740,16 +1745,49 @@
case Primitive::kPrimNot: {
if (!needs_runtime_call) {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ GpuRegister base_reg;
GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ StoreToOffset(kStoreWord, value, obj, offset);
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
+ base_reg = obj;
} else {
DCHECK(index.IsRegister()) << index;
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
__ Daddu(TMP, obj, TMP);
- __ StoreToOffset(kStoreWord, value, TMP, data_offset);
+ base_reg = TMP;
+ }
+ if (kPoisonHeapReferences && needs_write_barrier) {
+ // Note that in the case where `value` is a null reference,
+ // we do not enter this block, as a null reference does not
+ // need poisoning.
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ // Use Sw() instead of StoreToOffset() in order to be able to
+ // hold the poisoned reference in AT and thus avoid allocating
+ // yet another temporary register.
+ if (index.IsConstant()) {
+ if (!IsInt<16>(static_cast<int32_t>(data_offset))) {
+ int16_t low16 = Low16Bits(data_offset);
+ // For consistency with StoreToOffset() and such treat data_offset as int32_t.
+ uint64_t high48 = static_cast<uint64_t>(static_cast<int32_t>(data_offset)) - low16;
+ int16_t upper16 = High16Bits(high48);
+ // Allow the full [-2GB,+2GB) range in case `low16` is negative and needs a
+ // compensatory 64KB added, which may push `high48` above 2GB and require
+ // the dahi instruction.
+ int16_t higher16 = High32Bits(high48) + ((upper16 < 0) ? 1 : 0);
+ __ Daui(TMP, obj, upper16);
+ if (higher16 != 0) {
+ __ Dahi(TMP, higher16);
+ }
+ base_reg = TMP;
+ data_offset = low16;
+ }
+ } else {
+ DCHECK(IsInt<16>(static_cast<int32_t>(data_offset)));
+ }
+ __ PoisonHeapReference(AT, value);
+ __ Sw(AT, base_reg, data_offset);
+ } else {
+ __ StoreToOffset(kStoreWord, value, base_reg, data_offset);
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
if (needs_write_barrier) {
@@ -1758,6 +1796,8 @@
}
} else {
DCHECK_EQ(value_type, Primitive::kPrimNot);
+ // Note: if heap poisoning is enabled, pAputObject takes care
+ // of poisoning the reference.
codegen_->InvokeRuntime(kQuickAputObject, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
}
@@ -1871,6 +1911,7 @@
__ Beqzc(obj, slow_path->GetExitLabel());
// Compare the class of `obj` with `cls`.
__ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
+ __ MaybeUnpoisonHeapReference(obj_cls);
__ Bnec(obj_cls, cls, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
@@ -3086,6 +3127,7 @@
LocationSummary* locations = instruction->GetLocations();
GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
LoadOperandType load_type = kLoadUnsignedByte;
+ uint32_t offset = field_info.GetFieldOffset().Uint32Value();
switch (type) {
case Primitive::kPrimBoolean:
load_type = kLoadUnsignedByte;
@@ -3117,15 +3159,20 @@
if (!Primitive::IsFloatingPointType(type)) {
DCHECK(locations->Out().IsRegister());
GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
- __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
+ __ LoadFromOffset(load_type, dst, obj, offset);
} else {
DCHECK(locations->Out().IsFpuRegister());
FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
- __ LoadFpuFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
+ __ LoadFpuFromOffset(load_type, dst, obj, offset);
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
// TODO: memory barrier?
+
+ if (type == Primitive::kPrimNot) {
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ __ MaybeUnpoisonHeapReference(dst);
+ }
}
void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
@@ -3147,6 +3194,8 @@
LocationSummary* locations = instruction->GetLocations();
GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
StoreOperandType store_type = kStoreByte;
+ uint32_t offset = field_info.GetFieldOffset().Uint32Value();
+ bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1));
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
@@ -3172,16 +3221,25 @@
if (!Primitive::IsFloatingPointType(type)) {
DCHECK(locations->InAt(1).IsRegister());
GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
- __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
+ if (kPoisonHeapReferences && needs_write_barrier) {
+ // Note that in the case where `value` is a null reference,
+ // we do not enter this block, as a null reference does not
+ // need poisoning.
+ DCHECK_EQ(type, Primitive::kPrimNot);
+ __ PoisonHeapReference(TMP, src);
+ __ StoreToOffset(store_type, TMP, obj, offset);
+ } else {
+ __ StoreToOffset(store_type, src, obj, offset);
+ }
} else {
DCHECK(locations->InAt(1).IsFpuRegister());
FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>();
- __ StoreFpuToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
+ __ StoreFpuToOffset(store_type, src, obj, offset);
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
// TODO: memory barriers?
- if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
+ if (needs_write_barrier) {
DCHECK(locations->InAt(1).IsRegister());
GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
codegen_->MarkGCCard(obj, src, value_can_be_null);
@@ -3247,6 +3305,7 @@
// Compare the class of `obj` with `cls`.
__ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
+ __ MaybeUnpoisonHeapReference(out);
if (instruction->IsExactCheck()) {
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
@@ -3325,6 +3384,14 @@
__ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // Instead of simply (possibly) unpoisoning `temp` here, we should
+ // emit a read barrier for the previous class reference load.
+ // However this is not required in practice, as this is an
+ // intermediate/temporary reference and because the current
+ // concurrent copying collector keeps the from-space memory
+ // intact/accessible until the end of the marking phase (the
+ // concurrent copying collector may not in the future).
+ __ MaybeUnpoisonHeapReference(temp);
__ LoadFromOffset(kLoadDoubleword, temp, temp,
mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value());
uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
@@ -3567,6 +3634,14 @@
// temp = object->GetClass();
__ LoadFromOffset(kLoadUnsignedWord, temp, receiver, class_offset);
MaybeRecordImplicitNullCheck(invoke);
+ // Instead of simply (possibly) unpoisoning `temp` here, we should
+ // emit a read barrier for the previous class reference load.
+ // However this is not required in practice, as this is an
+ // intermediate/temporary reference and because the current
+ // concurrent copying collector keeps the from-space memory
+ // intact/accessible until the end of the marking phase (the
+ // concurrent copying collector may not in the future).
+ __ MaybeUnpoisonHeapReference(temp);
// temp = temp->GetMethodAt(method_offset);
__ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
// T9 = temp->GetEntryPoint();
@@ -3666,8 +3741,8 @@
case HLoadClass::LoadKind::kBssEntry: {
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
- codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
- __ Lwu(out, AT, /* placeholder */ 0x5678);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out);
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678);
generate_null_check = true;
break;
}
@@ -3773,8 +3848,8 @@
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
- codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
- __ Lwu(out, AT, /* placeholder */ 0x5678);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out);
+ GenerateGcRootFieldLoad(load, out_loc, out, /* placeholder */ 0x5678);
SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
codegen_->AddSlowPath(slow_path);
__ Beqzc(out, slow_path->GetEntryLabel());
@@ -3944,6 +4019,8 @@
}
void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
+ // Note: if heap poisoning is enabled, the entry point takes care
+ // of poisoning the reference.
codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
}
@@ -3961,6 +4038,8 @@
}
void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
+ // Note: if heap poisoning is enabled, the entry point takes care
+ // of poisoning the reference.
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
GpuRegister temp = instruction->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 5539413..1cd65c1 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -377,6 +377,53 @@
return false;
}
+bool InductionVarRange::IsUnitStride(HInstruction* instruction,
+ /*out*/ HInstruction** offset) const {
+ HLoopInformation* loop = nullptr;
+ HInductionVarAnalysis::InductionInfo* info = nullptr;
+ HInductionVarAnalysis::InductionInfo* trip = nullptr;
+ if (HasInductionInfo(instruction, instruction, &loop, &info, &trip)) {
+ if (info->induction_class == HInductionVarAnalysis::kLinear &&
+ info->op_b->operation == HInductionVarAnalysis::kFetch) {
+ int64_t stride_value = 0;
+ if (IsConstant(info->op_a, kExact, &stride_value) && stride_value == 1) {
+ int64_t off_value = 0;
+ if (IsConstant(info->op_b, kExact, &off_value) && off_value == 0) {
+ *offset = nullptr;
+ } else {
+ *offset = info->op_b->fetch;
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+HInstruction* InductionVarRange::GenerateTripCount(HLoopInformation* loop,
+ HGraph* graph,
+ HBasicBlock* block) {
+ HInductionVarAnalysis::InductionInfo *trip =
+ induction_analysis_->LookupInfo(loop, GetLoopControl(loop));
+ if (trip != nullptr && !IsUnsafeTripCount(trip)) {
+ HInstruction* taken_test = nullptr;
+ HInstruction* trip_expr = nullptr;
+ if (IsBodyTripCount(trip)) {
+ if (!GenerateCode(trip->op_b, nullptr, graph, block, &taken_test, false, false)) {
+ return nullptr;
+ }
+ }
+ if (GenerateCode(trip->op_a, nullptr, graph, block, &trip_expr, false, false)) {
+ if (taken_test != nullptr) {
+ HInstruction* zero = graph->GetConstant(trip->type, 0);
+ trip_expr = Insert(block, new (graph->GetArena()) HSelect(taken_test, trip_expr, zero, kNoDexPc));
+ }
+ return trip_expr;
+ }
+ }
+ return nullptr;
+}
+
//
// Private class methods.
//
@@ -1157,12 +1204,15 @@
HInstruction* opb = nullptr;
switch (info->induction_class) {
case HInductionVarAnalysis::kInvariant:
- // Invariants (note that even though is_min does not impact code generation for
- // invariants, some effort is made to keep this parameter consistent).
+ // Invariants (note that since invariants only have other invariants as
+ // sub expressions, viz. no induction, there is no need to adjust is_min).
switch (info->operation) {
case HInductionVarAnalysis::kAdd:
- case HInductionVarAnalysis::kRem: // no proper is_min for second arg
- case HInductionVarAnalysis::kXor: // no proper is_min for second arg
+ case HInductionVarAnalysis::kSub:
+ case HInductionVarAnalysis::kMul:
+ case HInductionVarAnalysis::kDiv:
+ case HInductionVarAnalysis::kRem:
+ case HInductionVarAnalysis::kXor:
case HInductionVarAnalysis::kLT:
case HInductionVarAnalysis::kLE:
case HInductionVarAnalysis::kGT:
@@ -1174,6 +1224,12 @@
switch (info->operation) {
case HInductionVarAnalysis::kAdd:
operation = new (graph->GetArena()) HAdd(type, opa, opb); break;
+ case HInductionVarAnalysis::kSub:
+ operation = new (graph->GetArena()) HSub(type, opa, opb); break;
+ case HInductionVarAnalysis::kMul:
+ operation = new (graph->GetArena()) HMul(type, opa, opb, kNoDexPc); break;
+ case HInductionVarAnalysis::kDiv:
+ operation = new (graph->GetArena()) HDiv(type, opa, opb, kNoDexPc); break;
case HInductionVarAnalysis::kRem:
operation = new (graph->GetArena()) HRem(type, opa, opb, kNoDexPc); break;
case HInductionVarAnalysis::kXor:
@@ -1194,16 +1250,7 @@
return true;
}
break;
- case HInductionVarAnalysis::kSub: // second reversed!
- if (GenerateCode(info->op_a, trip, graph, block, &opa, in_body, is_min) &&
- GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) {
- if (graph != nullptr) {
- *result = Insert(block, new (graph->GetArena()) HSub(type, opa, opb));
- }
- return true;
- }
- break;
- case HInductionVarAnalysis::kNeg: // reversed!
+ case HInductionVarAnalysis::kNeg:
if (GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) {
if (graph != nullptr) {
*result = Insert(block, new (graph->GetArena()) HNeg(type, opb));
@@ -1240,9 +1287,9 @@
}
}
break;
- default:
- break;
- }
+ case HInductionVarAnalysis::kNop:
+ LOG(FATAL) << "unexpected invariant nop";
+ } // switch invariant operation
break;
case HInductionVarAnalysis::kLinear: {
// Linear induction a * i + b, for normalized 0 <= i < TC. For ranges, this should
@@ -1293,7 +1340,7 @@
}
break;
}
- }
+ } // switch induction class
}
return false;
}
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index 6c424b7..0858d73 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -24,7 +24,8 @@
/**
* This class implements range analysis on expressions within loops. It takes the results
* of induction variable analysis in the constructor and provides a public API to obtain
- * a conservative lower and upper bound value on each instruction in the HIR.
+ * a conservative lower and upper bound value or last value on each instruction in the HIR.
+ * The public API also provides a few general-purpose utility methods related to induction.
*
* The range analysis is done with a combination of symbolic and partial integral evaluation
* of expressions. The analysis avoids complications with wrap-around arithmetic on the integral
@@ -154,6 +155,19 @@
*/
bool IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) const;
+ /**
+ * Checks if instruction is a unit stride induction inside the closest enveloping loop.
+ * Returns invariant offset on success.
+ */
+ bool IsUnitStride(HInstruction* instruction, /*out*/ HInstruction** offset) const;
+
+ /**
+ * Generates the trip count expression for the given loop. Code is generated in given block
+ * and graph. The expression is guarded by a taken test if needed. Returns the trip count
+ * expression on success or null otherwise.
+ */
+ HInstruction* GenerateTripCount(HLoopInformation* loop, HGraph* graph, HBasicBlock* block);
+
private:
/*
* Enum used in IsConstant() request.
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index d81817f..fcdf8eb 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -48,6 +48,11 @@
EXPECT_EQ(v1.is_known, v2.is_known);
}
+ void ExpectInt(int32_t value, HInstruction* i) {
+ ASSERT_TRUE(i->IsIntConstant());
+ EXPECT_EQ(value, i->AsIntConstant()->GetValue());
+ }
+
//
// Construction methods.
//
@@ -757,10 +762,20 @@
// Last value (unsimplified).
HInstruction* last = range_.GenerateLastValue(phi, graph_, loop_preheader_);
ASSERT_TRUE(last->IsAdd());
- ASSERT_TRUE(last->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, last->InputAt(0)->AsIntConstant()->GetValue());
- ASSERT_TRUE(last->InputAt(1)->IsIntConstant());
- EXPECT_EQ(0, last->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(1000, last->InputAt(0));
+ ExpectInt(0, last->InputAt(1));
+
+ // Loop logic.
+ int64_t tc = 0;
+ EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
+ EXPECT_EQ(1000, tc);
+ HInstruction* offset = nullptr;
+ EXPECT_TRUE(range_.IsUnitStride(phi, &offset));
+ EXPECT_TRUE(offset == nullptr);
+ HInstruction* tce = range_.GenerateTripCount(
+ loop_header_->GetLoopInformation(), graph_, loop_preheader_);
+ ASSERT_TRUE(tce != nullptr);
+ ExpectInt(1000, tce);
}
TEST_F(InductionVarRangeTest, ConstantTripCountDown) {
@@ -799,15 +814,27 @@
// Last value (unsimplified).
HInstruction* last = range_.GenerateLastValue(phi, graph_, loop_preheader_);
ASSERT_TRUE(last->IsSub());
- ASSERT_TRUE(last->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, last->InputAt(0)->AsIntConstant()->GetValue());
+ ExpectInt(1000, last->InputAt(0));
ASSERT_TRUE(last->InputAt(1)->IsNeg());
last = last->InputAt(1)->InputAt(0);
ASSERT_TRUE(last->IsSub());
- ASSERT_TRUE(last->InputAt(0)->IsIntConstant());
- EXPECT_EQ(0, last->InputAt(0)->AsIntConstant()->GetValue());
- ASSERT_TRUE(last->InputAt(1)->IsIntConstant());
- EXPECT_EQ(1000, last->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(0, last->InputAt(0));
+ ExpectInt(1000, last->InputAt(1));
+
+ // Loop logic.
+ int64_t tc = 0;
+ EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
+ EXPECT_EQ(1000, tc);
+ HInstruction* offset = nullptr;
+ EXPECT_FALSE(range_.IsUnitStride(phi, &offset));
+ HInstruction* tce = range_.GenerateTripCount(
+ loop_header_->GetLoopInformation(), graph_, loop_preheader_);
+ ASSERT_TRUE(tce != nullptr);
+ ASSERT_TRUE(tce->IsNeg());
+ last = tce->InputAt(0);
+ EXPECT_TRUE(last->IsSub());
+ ExpectInt(0, last->InputAt(0));
+ ExpectInt(1000, last->InputAt(1));
}
TEST_F(InductionVarRangeTest, SymbolicTripCountUp) {
@@ -851,27 +878,22 @@
// Verify lower is 0+0.
ASSERT_TRUE(lower != nullptr);
ASSERT_TRUE(lower->IsAdd());
- ASSERT_TRUE(lower->InputAt(0)->IsIntConstant());
- EXPECT_EQ(0, lower->InputAt(0)->AsIntConstant()->GetValue());
- ASSERT_TRUE(lower->InputAt(1)->IsIntConstant());
- EXPECT_EQ(0, lower->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(0, lower->InputAt(0));
+ ExpectInt(0, lower->InputAt(1));
// Verify upper is (V-1)+0.
ASSERT_TRUE(upper != nullptr);
ASSERT_TRUE(upper->IsAdd());
ASSERT_TRUE(upper->InputAt(0)->IsSub());
EXPECT_TRUE(upper->InputAt(0)->InputAt(0)->IsParameterValue());
- ASSERT_TRUE(upper->InputAt(0)->InputAt(1)->IsIntConstant());
- EXPECT_EQ(1, upper->InputAt(0)->InputAt(1)->AsIntConstant()->GetValue());
- ASSERT_TRUE(upper->InputAt(1)->IsIntConstant());
- EXPECT_EQ(0, upper->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(1, upper->InputAt(0)->InputAt(1));
+ ExpectInt(0, upper->InputAt(1));
// Verify taken-test is 0<V.
HInstruction* taken = range_.GenerateTakenTest(increment_, graph_, loop_preheader_);
ASSERT_TRUE(taken != nullptr);
ASSERT_TRUE(taken->IsLessThan());
- ASSERT_TRUE(taken->InputAt(0)->IsIntConstant());
- EXPECT_EQ(0, taken->InputAt(0)->AsIntConstant()->GetValue());
+ ExpectInt(0, taken->InputAt(0));
EXPECT_TRUE(taken->InputAt(1)->IsParameterValue());
// Replacement.
@@ -880,6 +902,21 @@
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(1), v1);
ExpectEqual(Value(y_, 1, 0), v2);
+
+ // Loop logic.
+ int64_t tc = 0;
+ EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
+ EXPECT_EQ(0, tc); // unknown
+ HInstruction* offset = nullptr;
+ EXPECT_TRUE(range_.IsUnitStride(phi, &offset));
+ EXPECT_TRUE(offset == nullptr);
+ HInstruction* tce = range_.GenerateTripCount(
+ loop_header_->GetLoopInformation(), graph_, loop_preheader_);
+ ASSERT_TRUE(tce != nullptr);
+ EXPECT_TRUE(tce->IsSelect()); // guarded by taken-test
+ ExpectInt(0, tce->InputAt(0));
+ EXPECT_TRUE(tce->InputAt(1)->IsParameterValue());
+ EXPECT_TRUE(tce->InputAt(2)->IsLessThan());
}
TEST_F(InductionVarRangeTest, SymbolicTripCountDown) {
@@ -923,32 +960,26 @@
// Verify lower is 1000-((1000-V)-1).
ASSERT_TRUE(lower != nullptr);
ASSERT_TRUE(lower->IsSub());
- ASSERT_TRUE(lower->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, lower->InputAt(0)->AsIntConstant()->GetValue());
+ ExpectInt(1000, lower->InputAt(0));
lower = lower->InputAt(1);
ASSERT_TRUE(lower->IsSub());
- ASSERT_TRUE(lower->InputAt(1)->IsIntConstant());
- EXPECT_EQ(1, lower->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(1, lower->InputAt(1));
lower = lower->InputAt(0);
ASSERT_TRUE(lower->IsSub());
- ASSERT_TRUE(lower->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, lower->InputAt(0)->AsIntConstant()->GetValue());
+ ExpectInt(1000, lower->InputAt(0));
EXPECT_TRUE(lower->InputAt(1)->IsParameterValue());
// Verify upper is 1000-0.
ASSERT_TRUE(upper != nullptr);
ASSERT_TRUE(upper->IsSub());
- ASSERT_TRUE(upper->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, upper->InputAt(0)->AsIntConstant()->GetValue());
- ASSERT_TRUE(upper->InputAt(1)->IsIntConstant());
- EXPECT_EQ(0, upper->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(1000, upper->InputAt(0));
+ ExpectInt(0, upper->InputAt(1));
// Verify taken-test is 1000>V.
HInstruction* taken = range_.GenerateTakenTest(increment_, graph_, loop_preheader_);
ASSERT_TRUE(taken != nullptr);
ASSERT_TRUE(taken->IsGreaterThan());
- ASSERT_TRUE(taken->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, taken->InputAt(0)->AsIntConstant()->GetValue());
+ ExpectInt(1000, taken->InputAt(0));
EXPECT_TRUE(taken->InputAt(1)->IsParameterValue());
// Replacement.
@@ -957,6 +988,23 @@
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(y_, 1, 0), v1);
ExpectEqual(Value(999), v2);
+
+ // Loop logic.
+ int64_t tc = 0;
+ EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
+ EXPECT_EQ(0, tc); // unknown
+ HInstruction* offset = nullptr;
+ EXPECT_FALSE(range_.IsUnitStride(phi, &offset));
+ HInstruction* tce = range_.GenerateTripCount(
+ loop_header_->GetLoopInformation(), graph_, loop_preheader_);
+ ASSERT_TRUE(tce != nullptr);
+ EXPECT_TRUE(tce->IsSelect()); // guarded by taken-test
+ ExpectInt(0, tce->InputAt(0));
+ EXPECT_TRUE(tce->InputAt(1)->IsSub());
+ EXPECT_TRUE(tce->InputAt(2)->IsGreaterThan());
+ tce = tce->InputAt(1);
+ ExpectInt(1000, taken->InputAt(0));
+ EXPECT_TRUE(taken->InputAt(1)->IsParameterValue());
}
} // namespace art
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 17d683f..8df80ad 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -19,6 +19,7 @@
#include "art_method.h"
#include "class_linker.h"
#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
#include "invoke_type.h"
#include "mirror/dex_cache-inl.h"
#include "nodes.h"
@@ -178,4 +179,112 @@
return os;
}
+void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke,
+ CodeGenerator* codegen,
+ Location return_location,
+ Location first_argument_location) {
+ if (Runtime::Current()->IsAotCompiler()) {
+ if (codegen->GetCompilerOptions().IsBootImage() ||
+ codegen->GetCompilerOptions().GetCompilePic()) {
+ // TODO(ngeoffray): Support boot image compilation.
+ return;
+ }
+ }
+
+ IntegerValueOfInfo info = ComputeIntegerValueOfInfo();
+
+ // Most common case is that we have found all we needed (classes are initialized
+ // and in the boot image). Bail if not.
+ if (info.integer_cache == nullptr ||
+ info.integer == nullptr ||
+ info.cache == nullptr ||
+ info.value_offset == 0 ||
+ // low and high cannot be 0, per the spec.
+ info.low == 0 ||
+ info.high == 0) {
+ LOG(INFO) << "Integer.valueOf will not be optimized";
+ return;
+ }
+
+ // The intrinsic will call if it needs to allocate a j.l.Integer.
+ LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetArena()) LocationSummary(
+ invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
+ if (!invoke->InputAt(0)->IsConstant()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+ locations->AddTemp(first_argument_location);
+ locations->SetOut(return_location);
+}
+
+IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo() {
+ // Note that we could cache all of the data looked up here. but there's no good
+ // location for it. We don't want to add it to WellKnownClasses, to avoid creating global
+ // jni values. Adding it as state to the compiler singleton seems like wrong
+ // separation of concerns.
+ // The need for this data should be pretty rare though.
+
+ // The most common case is that the classes are in the boot image and initialized,
+ // which is easy to generate code for. We bail if not.
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ Runtime* runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ gc::Heap* heap = runtime->GetHeap();
+ IntegerValueOfInfo info;
+ info.integer_cache = class_linker->FindSystemClass(self, "Ljava/lang/Integer$IntegerCache;");
+ if (info.integer_cache == nullptr) {
+ self->ClearException();
+ return info;
+ }
+ if (!heap->ObjectIsInBootImageSpace(info.integer_cache) || !info.integer_cache->IsInitialized()) {
+ // Optimization only works if the class is initialized and in the boot image.
+ return info;
+ }
+ info.integer = class_linker->FindSystemClass(self, "Ljava/lang/Integer;");
+ if (info.integer == nullptr) {
+ self->ClearException();
+ return info;
+ }
+ if (!heap->ObjectIsInBootImageSpace(info.integer) || !info.integer->IsInitialized()) {
+ // Optimization only works if the class is initialized and in the boot image.
+ return info;
+ }
+
+ ArtField* field = info.integer_cache->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
+ if (field == nullptr) {
+ return info;
+ }
+ info.cache = static_cast<mirror::ObjectArray<mirror::Object>*>(
+ field->GetObject(info.integer_cache).Ptr());
+ if (info.cache == nullptr) {
+ return info;
+ }
+
+ if (!heap->ObjectIsInBootImageSpace(info.cache)) {
+ // Optimization only works if the object is in the boot image.
+ return info;
+ }
+
+ field = info.integer->FindDeclaredInstanceField("value", "I");
+ if (field == nullptr) {
+ return info;
+ }
+ info.value_offset = field->GetOffset().Int32Value();
+
+ field = info.integer_cache->FindDeclaredStaticField("low", "I");
+ if (field == nullptr) {
+ return info;
+ }
+ info.low = field->GetInt(info.integer_cache);
+
+ field = info.integer_cache->FindDeclaredStaticField("high", "I");
+ if (field == nullptr) {
+ return info;
+ }
+ info.high = field->GetInt(info.integer_cache);
+
+ DCHECK_EQ(info.cache->GetLength(), info.high - info.low + 1);
+ return info;
+}
+
} // namespace art
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 6425e13..9da5a7f 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -113,6 +113,39 @@
codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
}
+ static void ComputeIntegerValueOfLocations(HInvoke* invoke,
+ CodeGenerator* codegen,
+ Location return_location,
+ Location first_argument_location);
+
+ // Temporary data structure for holding Integer.valueOf useful data. We only
+ // use it if the mirror::Class* are in the boot image, so it is fine to keep raw
+ // mirror::Class pointers in this structure.
+ struct IntegerValueOfInfo {
+ IntegerValueOfInfo()
+ : integer_cache(nullptr),
+ integer(nullptr),
+ cache(nullptr),
+ low(0),
+ high(0),
+ value_offset(0) {}
+
+ // The java.lang.IntegerCache class.
+ mirror::Class* integer_cache;
+ // The java.lang.Integer class.
+ mirror::Class* integer;
+ // Value of java.lang.IntegerCache#cache.
+ mirror::ObjectArray<mirror::Object>* cache;
+ // Value of java.lang.IntegerCache#low.
+ int32_t low;
+ // Value of java.lang.IntegerCache#high.
+ int32_t high;
+ // The offset of java.lang.Integer.value.
+ int32_t value_offset;
+ };
+
+ static IntegerValueOfInfo ComputeIntegerValueOfInfo();
+
protected:
IntrinsicVisitor() {}
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index c262cf9..86000e9 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -129,6 +129,7 @@
IntrinsicLocationsBuilderARM::IntrinsicLocationsBuilderARM(CodeGeneratorARM* codegen)
: arena_(codegen->GetGraph()->GetArena()),
+ codegen_(codegen),
assembler_(codegen->GetAssembler()),
features_(codegen->GetInstructionSetFeatures()) {}
@@ -2644,6 +2645,75 @@
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderARM::VisitIntegerValueOf(HInvoke* invoke) {
+ InvokeRuntimeCallingConvention calling_convention;
+ IntrinsicVisitor::ComputeIntegerValueOfLocations(
+ invoke,
+ codegen_,
+ Location::RegisterLocation(R0),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void IntrinsicCodeGeneratorARM::VisitIntegerValueOf(HInvoke* invoke) {
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ LocationSummary* locations = invoke->GetLocations();
+ ArmAssembler* const assembler = GetAssembler();
+
+ Register out = locations->Out().AsRegister<Register>();
+ InvokeRuntimeCallingConvention calling_convention;
+ Register argument = calling_convention.GetRegisterAt(0);
+ if (invoke->InputAt(0)->IsConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ if (value >= info.low && value <= info.high) {
+ // Just embed the j.l.Integer in the code.
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Object* boxed = info.cache->Get(value + (-info.low));
+ DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
+ __ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
+ } else {
+ // Allocate and initialize a new j.l.Integer.
+ // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // JIT object table.
+ uint32_t address =
+ dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ LoadLiteral(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ LoadImmediate(IP, value);
+ __ StoreToOffset(kStoreWord, IP, out, info.value_offset);
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ }
+ } else {
+ Register in = locations->InAt(0).AsRegister<Register>();
+ // Check bounds of our cache.
+ __ AddConstant(out, in, -info.low);
+ __ CmpConstant(out, info.high - info.low + 1);
+ Label allocate, done;
+ __ b(&allocate, HS);
+ // If the value is within the bounds, load the j.l.Integer directly from the array.
+ uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
+ __ LoadLiteral(IP, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
+ codegen_->LoadFromShiftedRegOffset(Primitive::kPrimNot, locations->Out(), IP, out);
+ __ MaybeUnpoisonHeapReference(out);
+ __ b(&done);
+ __ Bind(&allocate);
+ // Otherwise allocate and initialize a new j.l.Integer.
+ address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ LoadLiteral(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ StoreToOffset(kStoreWord, in, out, info.value_offset);
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ __ Bind(&done);
+ }
+}
+
UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble)
UNIMPLEMENTED_INTRINSIC(ARM, MathMinFloatFloat)
UNIMPLEMENTED_INTRINSIC(ARM, MathMaxDoubleDouble)
diff --git a/compiler/optimizing/intrinsics_arm.h b/compiler/optimizing/intrinsics_arm.h
index 7f20ea4..2840863 100644
--- a/compiler/optimizing/intrinsics_arm.h
+++ b/compiler/optimizing/intrinsics_arm.h
@@ -51,6 +51,7 @@
private:
ArenaAllocator* arena_;
+ CodeGenerator* codegen_;
ArmAssembler* assembler_;
const ArmInstructionSetFeatures& features_;
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 86e5429..6c3938c 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2924,6 +2924,79 @@
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) {
+ InvokeRuntimeCallingConvention calling_convention;
+ IntrinsicVisitor::ComputeIntegerValueOfLocations(
+ invoke,
+ codegen_,
+ calling_convention.GetReturnLocation(Primitive::kPrimNot),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
+}
+
+void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ LocationSummary* locations = invoke->GetLocations();
+ MacroAssembler* masm = GetVIXLAssembler();
+
+ Register out = RegisterFrom(locations->Out(), Primitive::kPrimNot);
+ UseScratchRegisterScope temps(masm);
+ Register temp = temps.AcquireW();
+ InvokeRuntimeCallingConvention calling_convention;
+ Register argument = calling_convention.GetRegisterAt(0);
+ if (invoke->InputAt(0)->IsConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ if (value >= info.low && value <= info.high) {
+ // Just embed the j.l.Integer in the code.
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Object* boxed = info.cache->Get(value + (-info.low));
+ DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
+ __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ } else {
+ // Allocate and initialize a new j.l.Integer.
+ // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // JIT object table.
+ uint32_t address =
+ dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ Mov(temp.W(), value);
+ __ Str(temp.W(), HeapOperand(out.W(), info.value_offset));
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ }
+ } else {
+ Register in = RegisterFrom(locations->InAt(0), Primitive::kPrimInt);
+ // Check bounds of our cache.
+ __ Add(out.W(), in.W(), -info.low);
+ __ Cmp(out.W(), info.high - info.low + 1);
+ vixl::aarch64::Label allocate, done;
+ __ B(&allocate, hs);
+ // If the value is within the bounds, load the j.l.Integer directly from the array.
+ uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
+ __ Ldr(temp.W(), codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
+ MemOperand source = HeapOperand(
+ temp, out.X(), LSL, Primitive::ComponentSizeShift(Primitive::kPrimNot));
+ codegen_->Load(Primitive::kPrimNot, out, source);
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(out);
+ __ B(&done);
+ __ Bind(&allocate);
+ // Otherwise allocate and initialize a new j.l.Integer.
+ address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ Str(in.W(), HeapOperand(out.W(), info.value_offset));
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ __ Bind(&done);
+ }
+}
+
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit)
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index 28e41cb..3c53517 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -38,7 +38,8 @@
class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor {
public:
- explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* arena) : arena_(arena) {}
+ explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* arena, CodeGeneratorARM64* codegen)
+ : arena_(arena), codegen_(codegen) {}
// Define visitor methods.
@@ -56,6 +57,7 @@
private:
ArenaAllocator* arena_;
+ CodeGeneratorARM64* codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64);
};
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 70a3d38..aa89dea 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -203,6 +203,7 @@
IntrinsicLocationsBuilderARMVIXL::IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen)
: arena_(codegen->GetGraph()->GetArena()),
+ codegen_(codegen),
assembler_(codegen->GetAssembler()),
features_(codegen->GetInstructionSetFeatures()) {}
@@ -2988,6 +2989,77 @@
__ Vrintm(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
+void IntrinsicLocationsBuilderARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
+ InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ IntrinsicVisitor::ComputeIntegerValueOfLocations(
+ invoke,
+ codegen_,
+ LocationFrom(r0),
+ LocationFrom(calling_convention.GetRegisterAt(0)));
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ LocationSummary* locations = invoke->GetLocations();
+ ArmVIXLAssembler* const assembler = GetAssembler();
+
+ vixl32::Register out = RegisterFrom(locations->Out());
+ UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ vixl32::Register argument = calling_convention.GetRegisterAt(0);
+ if (invoke->InputAt(0)->IsConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ if (value >= info.low && value <= info.high) {
+ // Just embed the j.l.Integer in the code.
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Object* boxed = info.cache->Get(value + (-info.low));
+ DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
+ __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
+ } else {
+ // Allocate and initialize a new j.l.Integer.
+ // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // JIT object table.
+ uint32_t address =
+ dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ Mov(temp, value);
+ assembler->StoreToOffset(kStoreWord, temp, out, info.value_offset);
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ }
+ } else {
+ vixl32::Register in = RegisterFrom(locations->InAt(0));
+ // Check bounds of our cache.
+ __ Add(out, in, -info.low);
+ __ Cmp(out, info.high - info.low + 1);
+ vixl32::Label allocate, done;
+ __ B(hs, &allocate);
+ // If the value is within the bounds, load the j.l.Integer directly from the array.
+ uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
+ __ Ldr(temp, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
+ codegen_->LoadFromShiftedRegOffset(Primitive::kPrimNot, locations->Out(), temp, out);
+ assembler->MaybeUnpoisonHeapReference(out);
+ __ B(&done);
+ __ Bind(&allocate);
+ // Otherwise allocate and initialize a new j.l.Integer.
+ address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ assembler->StoreToOffset(kStoreWord, in, out, info.value_offset);
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ __ Bind(&done);
+ }
+}
+
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundFloat) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure.
diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h
index 6e79cb7..023cba1 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.h
+++ b/compiler/optimizing/intrinsics_arm_vixl.h
@@ -47,6 +47,7 @@
private:
ArenaAllocator* arena_;
+ CodeGenerator* codegen_;
ArmVIXLAssembler* assembler_;
const ArmInstructionSetFeatures& features_;
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 5999677..ba006ed 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1572,6 +1572,10 @@
__ Lwr(trg, TMP, 0);
__ Lwl(trg, TMP, 3);
}
+
+ if (type == Primitive::kPrimNot) {
+ __ MaybeUnpoisonHeapReference(trg);
+ }
}
}
@@ -1663,6 +1667,11 @@
if ((type == Primitive::kPrimInt) || (type == Primitive::kPrimNot)) {
Register value = locations->InAt(3).AsRegister<Register>();
+ if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
+ __ PoisonHeapReference(AT, value);
+ value = AT;
+ }
+
if (is_R6) {
__ Sw(value, TMP, 0);
} else {
@@ -1852,13 +1861,23 @@
codegen->MarkGCCard(base, value, value_can_be_null);
}
+ MipsLabel loop_head, exit_loop;
+ __ Addu(TMP, base, offset_lo);
+
+ if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
+ __ PoisonHeapReference(expected);
+ // Do not poison `value`, if it is the same register as
+ // `expected`, which has just been poisoned.
+ if (value != expected) {
+ __ PoisonHeapReference(value);
+ }
+ }
+
// do {
// tmp_value = [tmp_ptr] - expected;
// } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
// result = tmp_value != 0;
- MipsLabel loop_head, exit_loop;
- __ Addu(TMP, base, offset_lo);
__ Sync(0);
__ Bind(&loop_head);
if ((type == Primitive::kPrimInt) || (type == Primitive::kPrimNot)) {
@@ -1868,8 +1887,8 @@
__ LlR2(out, TMP);
}
} else {
- LOG(FATAL) << "Unsupported op size " << type;
- UNREACHABLE();
+ LOG(FATAL) << "Unsupported op size " << type;
+ UNREACHABLE();
}
__ Subu(out, out, expected); // If we didn't get the 'expected'
__ Sltiu(out, out, 1); // value, set 'out' to false, and
@@ -1894,6 +1913,15 @@
// cycle atomically then retry.
__ Bind(&exit_loop);
__ Sync(0);
+
+ if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
+ __ UnpoisonHeapReference(expected);
+ // Do not unpoison `value`, if it is the same register as
+ // `expected`, which has just been unpoisoned.
+ if (value != expected) {
+ __ UnpoisonHeapReference(value);
+ }
+ }
}
// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
@@ -2686,6 +2714,8 @@
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetObject)
+UNIMPLEMENTED_INTRINSIC(MIPS, IntegerValueOf)
+
UNREACHABLE_INTRINSICS(MIPS)
#undef __
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 10da5c2..21c5074 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1187,6 +1187,7 @@
case Primitive::kPrimNot:
__ Lwu(trg, TMP, 0);
+ __ MaybeUnpoisonHeapReference(trg);
break;
case Primitive::kPrimLong:
@@ -1285,7 +1286,12 @@
switch (type) {
case Primitive::kPrimInt:
case Primitive::kPrimNot:
- __ Sw(value, TMP, 0);
+ if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
+ __ PoisonHeapReference(AT, value);
+ __ Sw(AT, TMP, 0);
+ } else {
+ __ Sw(value, TMP, 0);
+ }
break;
case Primitive::kPrimLong:
@@ -1454,13 +1460,23 @@
codegen->MarkGCCard(base, value, value_can_be_null);
}
+ Mips64Label loop_head, exit_loop;
+ __ Daddu(TMP, base, offset);
+
+ if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
+ __ PoisonHeapReference(expected);
+ // Do not poison `value`, if it is the same register as
+ // `expected`, which has just been poisoned.
+ if (value != expected) {
+ __ PoisonHeapReference(value);
+ }
+ }
+
// do {
// tmp_value = [tmp_ptr] - expected;
// } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
// result = tmp_value != 0;
- Mips64Label loop_head, exit_loop;
- __ Daddu(TMP, base, offset);
__ Sync(0);
__ Bind(&loop_head);
if (type == Primitive::kPrimLong) {
@@ -1469,6 +1485,11 @@
// Note: We will need a read barrier here, when read barrier
// support is added to the MIPS64 back end.
__ Ll(out, TMP);
+ if (type == Primitive::kPrimNot) {
+ // The LL instruction sign-extends the 32-bit value, but
+ // 32-bit references must be zero-extended. Zero-extend `out`.
+ __ Dext(out, out, 0, 32);
+ }
}
__ Dsubu(out, out, expected); // If we didn't get the 'expected'
__ Sltiu(out, out, 1); // value, set 'out' to false, and
@@ -1487,6 +1508,15 @@
// cycle atomically then retry.
__ Bind(&exit_loop);
__ Sync(0);
+
+ if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
+ __ UnpoisonHeapReference(expected);
+ // Do not unpoison `value`, if it is the same register as
+ // `expected`, which has just been unpoisoned.
+ if (value != expected) {
+ __ UnpoisonHeapReference(value);
+ }
+ }
}
// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
@@ -2080,6 +2110,8 @@
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject)
+UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerValueOf)
+
UNREACHABLE_INTRINSICS(MIPS64)
#undef __
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index e1b7ea5..a671788 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -3335,6 +3335,65 @@
__ Bind(intrinsic_slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderX86::VisitIntegerValueOf(HInvoke* invoke) {
+ InvokeRuntimeCallingConvention calling_convention;
+ IntrinsicVisitor::ComputeIntegerValueOfLocations(
+ invoke,
+ codegen_,
+ Location::RegisterLocation(EAX),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) {
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ LocationSummary* locations = invoke->GetLocations();
+ X86Assembler* assembler = GetAssembler();
+
+ Register out = locations->Out().AsRegister<Register>();
+ InvokeRuntimeCallingConvention calling_convention;
+ if (invoke->InputAt(0)->IsConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ if (value >= info.low && value <= info.high) {
+ // Just embed the j.l.Integer in the code.
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Object* boxed = info.cache->Get(value + (-info.low));
+ DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
+ __ movl(out, Immediate(address));
+ } else {
+ // Allocate and initialize a new j.l.Integer.
+ // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // JIT object table.
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ movl(Address(out, info.value_offset), Immediate(value));
+ }
+ } else {
+ Register in = locations->InAt(0).AsRegister<Register>();
+ // Check bounds of our cache.
+ __ leal(out, Address(in, -info.low));
+ __ cmpl(out, Immediate(info.high - info.low + 1));
+ NearLabel allocate, done;
+ __ j(kAboveEqual, &allocate);
+ // If the value is within the bounds, load the j.l.Integer directly from the array.
+ uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
+ __ movl(out, Address(out, TIMES_4, data_offset + address));
+ __ MaybeUnpoisonHeapReference(out);
+ __ jmp(&done);
+ __ Bind(&allocate);
+ // Otherwise allocate and initialize a new j.l.Integer.
+ address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ movl(Address(out, info.value_offset), in);
+ __ Bind(&done);
+ }
+}
+
UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 05d270a..9a6dd98 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -39,7 +39,6 @@
: arena_(codegen->GetGraph()->GetArena()), codegen_(codegen) {
}
-
X86_64Assembler* IntrinsicCodeGeneratorX86_64::GetAssembler() {
return down_cast<X86_64Assembler*>(codegen_->GetAssembler());
}
@@ -2995,6 +2994,65 @@
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) {
+ InvokeRuntimeCallingConvention calling_convention;
+ IntrinsicVisitor::ComputeIntegerValueOfLocations(
+ invoke,
+ codegen_,
+ Location::RegisterLocation(RAX),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) {
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ LocationSummary* locations = invoke->GetLocations();
+ X86_64Assembler* assembler = GetAssembler();
+
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ InvokeRuntimeCallingConvention calling_convention;
+ if (invoke->InputAt(0)->IsConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ if (value >= info.low && value <= info.high) {
+ // Just embed the j.l.Integer in the code.
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Object* boxed = info.cache->Get(value + (-info.low));
+ DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
+ __ movl(out, Immediate(address));
+ } else {
+ // Allocate and initialize a new j.l.Integer.
+ // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // JIT object table.
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ movl(Address(out, info.value_offset), Immediate(value));
+ }
+ } else {
+ CpuRegister in = locations->InAt(0).AsRegister<CpuRegister>();
+ // Check bounds of our cache.
+ __ leal(out, Address(in, -info.low));
+ __ cmpl(out, Immediate(info.high - info.low + 1));
+ NearLabel allocate, done;
+ __ j(kAboveEqual, &allocate);
+ // If the value is within the bounds, load the j.l.Integer directly from the array.
+ uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
+ __ movl(out, Address(out, TIMES_4, data_offset + address));
+ __ MaybeUnpoisonHeapReference(out);
+ __ jmp(&done);
+ __ Bind(&allocate);
+ // Otherwise allocate and initialize a new j.l.Integer.
+ address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ movl(Address(out, info.value_offset), in);
+ __ Bind(&done);
+ }
+}
+
UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 8a9e618..c39aed2 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1914,6 +1914,9 @@
virtual bool IsControlFlow() const { return false; }
+ // Can the instruction throw?
+ // TODO: We should rename to CanVisiblyThrow, as some instructions (like HNewInstance),
+ // could throw OOME, but it is still OK to remove them if they are unused.
virtual bool CanThrow() const { return false; }
bool CanThrowIntoCatchBlock() const { return CanThrow() && block_->IsTryBlock(); }
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 5e83e82..2e2231b 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -3475,8 +3475,8 @@
CHECK(dest.IsCoreRegister() && base.AsMips().IsCoreRegister());
LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
base.AsMips().AsCoreRegister(), offs.Int32Value());
- if (kPoisonHeapReferences && unpoison_reference) {
- Subu(dest.AsCoreRegister(), ZERO, dest.AsCoreRegister());
+ if (unpoison_reference) {
+ MaybeUnpoisonHeapReference(dest.AsCoreRegister());
}
}
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 2fca185..47ddf25 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -727,6 +727,38 @@
void Pop(Register rd);
void PopAndReturn(Register rd, Register rt);
+ //
+ // Heap poisoning.
+ //
+
+ // Poison a heap reference contained in `src` and store it in `dst`.
+ void PoisonHeapReference(Register dst, Register src) {
+ // dst = -src.
+ Subu(dst, ZERO, src);
+ }
+ // Poison a heap reference contained in `reg`.
+ void PoisonHeapReference(Register reg) {
+ // reg = -reg.
+ PoisonHeapReference(reg, reg);
+ }
+ // Unpoison a heap reference contained in `reg`.
+ void UnpoisonHeapReference(Register reg) {
+ // reg = -reg.
+ Subu(reg, ZERO, reg);
+ }
+ // Poison a heap reference contained in `reg` if heap poisoning is enabled.
+ void MaybePoisonHeapReference(Register reg) {
+ if (kPoisonHeapReferences) {
+ PoisonHeapReference(reg);
+ }
+ }
+ // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
+ void MaybeUnpoisonHeapReference(Register reg) {
+ if (kPoisonHeapReferences) {
+ UnpoisonHeapReference(reg);
+ }
+ }
+
void Bind(Label* label) OVERRIDE {
Bind(down_cast<MipsLabel*>(label));
}
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 998f2c7..0f86f88 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -488,6 +488,11 @@
EmitI(0xf, rs, rt, imm16);
}
+void Mips64Assembler::Daui(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ CHECK_NE(rs, ZERO);
+ EmitI(0x1d, rs, rt, imm16);
+}
+
void Mips64Assembler::Dahi(GpuRegister rs, uint16_t imm16) {
EmitI(1, rs, static_cast<GpuRegister>(6), imm16);
}
@@ -2367,12 +2372,8 @@
CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister());
LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(),
base.AsMips64().AsGpuRegister(), offs.Int32Value());
- if (kPoisonHeapReferences && unpoison_reference) {
- // TODO: review
- // Negate the 32-bit ref
- Dsubu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister());
- // And constrain it to 32 bits (zero-extend into bits 32 through 63) as on Arm64 and x86/64
- Dext(dest.AsGpuRegister(), dest.AsGpuRegister(), 0, 32);
+ if (unpoison_reference) {
+ MaybeUnpoisonHeapReference(dest.AsGpuRegister());
}
}
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index a0a1db6..ee15c6d 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -512,6 +512,7 @@
void Ldpc(GpuRegister rs, uint32_t imm18); // MIPS64
void Lui(GpuRegister rt, uint16_t imm16);
void Aui(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Daui(GpuRegister rt, GpuRegister rs, uint16_t imm16); // MIPS64
void Dahi(GpuRegister rs, uint16_t imm16); // MIPS64
void Dati(GpuRegister rs, uint16_t imm16); // MIPS64
void Sync(uint32_t stype);
@@ -654,6 +655,44 @@
void Addiu32(GpuRegister rt, GpuRegister rs, int32_t value);
void Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp = AT); // MIPS64
+ //
+ // Heap poisoning.
+ //
+
+ // Poison a heap reference contained in `src` and store it in `dst`.
+ void PoisonHeapReference(GpuRegister dst, GpuRegister src) {
+ // dst = -src.
+ // Negate the 32-bit ref.
+ Dsubu(dst, ZERO, src);
+ // And constrain it to 32 bits (zero-extend into bits 32 through 63) as on Arm64 and x86/64.
+ Dext(dst, dst, 0, 32);
+ }
+ // Poison a heap reference contained in `reg`.
+ void PoisonHeapReference(GpuRegister reg) {
+ // reg = -reg.
+ PoisonHeapReference(reg, reg);
+ }
+ // Unpoison a heap reference contained in `reg`.
+ void UnpoisonHeapReference(GpuRegister reg) {
+ // reg = -reg.
+ // Negate the 32-bit ref.
+ Dsubu(reg, ZERO, reg);
+ // And constrain it to 32 bits (zero-extend into bits 32 through 63) as on Arm64 and x86/64.
+ Dext(reg, reg, 0, 32);
+ }
+ // Poison a heap reference contained in `reg` if heap poisoning is enabled.
+ void MaybePoisonHeapReference(GpuRegister reg) {
+ if (kPoisonHeapReferences) {
+ PoisonHeapReference(reg);
+ }
+ }
+ // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
+ void MaybeUnpoisonHeapReference(GpuRegister reg) {
+ if (kPoisonHeapReferences) {
+ UnpoisonHeapReference(reg);
+ }
+ }
+
void Bind(Label* label) OVERRIDE {
Bind(down_cast<Mips64Label*>(label));
}
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 74b8f06..96a02c4 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -1269,6 +1269,24 @@
DriverStr(RepeatRIb(&mips64::Mips64Assembler::Lui, 16, "lui ${reg}, {imm}"), "lui");
}
+TEST_F(AssemblerMIPS64Test, Daui) {
+ std::vector<mips64::GpuRegister*> reg1_registers = GetRegisters();
+ std::vector<mips64::GpuRegister*> reg2_registers = GetRegisters();
+ reg2_registers.erase(reg2_registers.begin()); // reg2 can't be ZERO, remove it.
+ std::vector<int64_t> imms = CreateImmediateValuesBits(/* imm_bits */ 16, /* as_uint */ true);
+ WarnOnCombinations(reg1_registers.size() * reg2_registers.size() * imms.size());
+ std::ostringstream expected;
+ for (mips64::GpuRegister* reg1 : reg1_registers) {
+ for (mips64::GpuRegister* reg2 : reg2_registers) {
+ for (int64_t imm : imms) {
+ __ Daui(*reg1, *reg2, imm);
+ expected << "daui $" << *reg1 << ", $" << *reg2 << ", " << imm << "\n";
+ }
+ }
+ }
+ DriverStr(expected.str(), "daui");
+}
+
TEST_F(AssemblerMIPS64Test, Dahi) {
DriverStr(RepeatRIb(&mips64::Mips64Assembler::Dahi, 16, "dahi ${reg}, ${reg}, {imm}"), "dahi");
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index be75628..dbae70e 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -2821,6 +2821,9 @@
// When given --host, finish early without stripping.
if (dex2oat.IsHost()) {
+ if (!dex2oat.FlushCloseOutputFiles()) {
+ return EXIT_FAILURE;
+ }
dex2oat.DumpTiming();
return EXIT_SUCCESS;
}
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 2c0b125..b79050e 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -37,6 +37,8 @@
namespace art {
+using android::base::StringPrintf;
+
class Dex2oatTest : public Dex2oatEnvironmentTest {
public:
virtual void TearDown() OVERRIDE {
@@ -52,10 +54,18 @@
const std::string& odex_location,
CompilerFilter::Filter filter,
const std::vector<std::string>& extra_args = {},
- bool expect_success = true) {
+ bool expect_success = true,
+ bool use_fd = false) {
+ std::unique_ptr<File> oat_file;
std::vector<std::string> args;
args.push_back("--dex-file=" + dex_location);
- args.push_back("--oat-file=" + odex_location);
+ if (use_fd) {
+ oat_file.reset(OS::CreateEmptyFile(odex_location.c_str()));
+ CHECK(oat_file != nullptr) << odex_location;
+ args.push_back("--oat-fd=" + std::to_string(oat_file->Fd()));
+ } else {
+ args.push_back("--oat-file=" + odex_location);
+ }
args.push_back("--compiler-filter=" + CompilerFilter::NameOfFilter(filter));
args.push_back("--runtime-arg");
args.push_back("-Xnorelocate");
@@ -64,6 +74,9 @@
std::string error_msg;
bool success = Dex2Oat(args, &error_msg);
+ if (oat_file != nullptr) {
+ ASSERT_EQ(oat_file->FlushClose(), 0) << "Could not flush and close oat file";
+ }
if (expect_success) {
ASSERT_TRUE(success) << error_msg << std::endl << output_;
@@ -582,12 +595,11 @@
ASSERT_TRUE(result);
}
- void RunTest() {
- std::string dex_location = GetScratchDir() + "/DexNoOat.jar";
- std::string profile_location = GetScratchDir() + "/primary.prof";
- std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex";
-
- Copy(GetDexSrc2(), dex_location);
+ void CompileProfileOdex(const std::string& dex_location,
+ const std::string& odex_location,
+ bool use_fd,
+ const std::vector<std::string>& extra_args = {}) {
+ const std::string profile_location = GetScratchDir() + "/primary.prof";
const char* location = dex_location.c_str();
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
@@ -595,14 +607,61 @@
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& dex_file = dex_files[0];
GenerateProfile(profile_location, dex_location, dex_file->GetLocationChecksum());
+ std::vector<std::string> copy(extra_args);
+ copy.push_back("--profile-file=" + profile_location);
+ GenerateOdexForTest(dex_location,
+ odex_location,
+ CompilerFilter::kSpeedProfile,
+ copy,
+ /* expect_success */ true,
+ use_fd);
+ }
- const std::vector<std::string>& extra_args = { "--profile-file=" + profile_location };
- GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeedProfile, extra_args);
+ void RunTest() {
+ std::string dex_location = GetScratchDir() + "/DexNoOat.jar";
+ std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex";
+ Copy(GetDexSrc2(), dex_location);
+
+ CompileProfileOdex(dex_location, odex_location, /* use_fd */ false);
CheckValidity();
ASSERT_TRUE(success_);
CheckResult(dex_location, odex_location);
}
+
+ void RunTestVDex() {
+ std::string dex_location = GetScratchDir() + "/DexNoOat.jar";
+ std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex";
+ std::string vdex_location = GetOdexDir() + "/DexOdexNoOat.vdex";
+ Copy(GetDexSrc2(), dex_location);
+
+ std::unique_ptr<File> vdex_file1(OS::CreateEmptyFile(vdex_location.c_str()));
+ CHECK(vdex_file1 != nullptr) << vdex_location;
+ ScratchFile vdex_file2;
+ {
+ std::string input_vdex = "--input-vdex-fd=-1";
+ std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file1->Fd());
+ CompileProfileOdex(dex_location,
+ odex_location,
+ /* use_fd */ true,
+ { input_vdex, output_vdex });
+ EXPECT_GT(vdex_file1->GetLength(), 0u);
+ }
+ {
+ std::string input_vdex = StringPrintf("--input-vdex-fd=%d", vdex_file1->Fd());
+ std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file2.GetFd());
+ CompileProfileOdex(dex_location,
+ odex_location,
+ /* use_fd */ true,
+ { input_vdex, output_vdex });
+ EXPECT_GT(vdex_file2.GetFile()->GetLength(), 0u);
+ }
+ ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
+ CheckValidity();
+ ASSERT_TRUE(success_);
+ CheckResult(dex_location, odex_location);
+ }
+
void CheckResult(const std::string& dex_location, const std::string& odex_location) {
// Host/target independent checks.
std::string error_msg;
@@ -641,29 +700,33 @@
EXPECT_EQ(odex_file->GetCompilerFilter(), CompilerFilter::kSpeedProfile);
}
- // Check whether the dex2oat run was really successful.
- void CheckValidity() {
- if (kIsTargetBuild) {
- CheckTargetValidity();
- } else {
- CheckHostValidity();
- }
+ // Check whether the dex2oat run was really successful.
+ void CheckValidity() {
+ if (kIsTargetBuild) {
+ CheckTargetValidity();
+ } else {
+ CheckHostValidity();
}
+ }
- void CheckTargetValidity() {
- // TODO: Ignore for now.
- }
+ void CheckTargetValidity() {
+ // TODO: Ignore for now.
+ }
- // On the host, we can get the dex2oat output. Here, look for "dex2oat took."
- void CheckHostValidity() {
- EXPECT_NE(output_.find("dex2oat took"), std::string::npos) << output_;
- }
- };
+ // On the host, we can get the dex2oat output. Here, look for "dex2oat took."
+ void CheckHostValidity() {
+ EXPECT_NE(output_.find("dex2oat took"), std::string::npos) << output_;
+ }
+};
TEST_F(Dex2oatLayoutTest, TestLayout) {
RunTest();
}
+TEST_F(Dex2oatLayoutTest, TestVdexLayout) {
+ RunTestVDex();
+}
+
class Dex2oatWatchdogTest : public Dex2oatTest {
protected:
void RunTest(bool expect_success, const std::vector<std::string>& extra_args = {}) {
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 131f4b9..a694099 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -649,7 +649,7 @@
}
}
int32_t size = DecodeSignedLeb128(&handlers_data);
- bool has_catch_all = size < 0;
+ bool has_catch_all = size <= 0;
if (has_catch_all) {
size = -size;
}
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 22619b9..4aa8b82 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -1529,10 +1529,18 @@
// NOTE: If the section following the code items is byte aligned, the last code item is left in
// place to preserve alignment. Layout needs an overhaul to handle movement of other sections.
int32_t DexLayout::LayoutCodeItems(std::vector<dex_ir::ClassData*> new_class_data_order) {
+ // Do not move code items if class data section precedes code item section.
+ // ULEB encoding is variable length, causing problems determining the offset of the code items.
+ // TODO: We should swap the order of these sections in the future to avoid this issue.
+ uint32_t class_data_offset = header_->GetCollections().ClassDatasOffset();
+ uint32_t code_item_offset = header_->GetCollections().CodeItemsOffset();
+ if (class_data_offset < code_item_offset) {
+ return 0;
+ }
+
// Find the last code item so we can leave it in place if the next section is not 4 byte aligned.
std::unordered_set<dex_ir::CodeItem*> visited_code_items;
- uint32_t offset = header_->GetCollections().CodeItemsOffset();
- bool is_code_item_aligned = IsNextSectionCodeItemAligned(offset);
+ bool is_code_item_aligned = IsNextSectionCodeItemAligned(code_item_offset);
if (!is_code_item_aligned) {
dex_ir::CodeItem* last_code_item = nullptr;
for (auto& code_item_pair : header_->GetCollections().CodeItems()) {
@@ -1552,18 +1560,18 @@
dex_ir::CodeItem* code_item = method->GetCodeItem();
if (code_item != nullptr && visited_code_items.find(code_item) == visited_code_items.end()) {
visited_code_items.insert(code_item);
- diff += UnsignedLeb128Size(offset) - UnsignedLeb128Size(code_item->GetOffset());
- code_item->SetOffset(offset);
- offset += RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
+ diff += UnsignedLeb128Size(code_item_offset) - UnsignedLeb128Size(code_item->GetOffset());
+ code_item->SetOffset(code_item_offset);
+ code_item_offset += RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
}
}
for (auto& method : *class_data->VirtualMethods()) {
dex_ir::CodeItem* code_item = method->GetCodeItem();
if (code_item != nullptr && visited_code_items.find(code_item) == visited_code_items.end()) {
visited_code_items.insert(code_item);
- diff += UnsignedLeb128Size(offset) - UnsignedLeb128Size(code_item->GetOffset());
- code_item->SetOffset(offset);
- offset += RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
+ diff += UnsignedLeb128Size(code_item_offset) - UnsignedLeb128Size(code_item->GetOffset());
+ code_item->SetOffset(code_item_offset);
+ code_item_offset += RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
}
}
}
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 9f0593a..2d084c1 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -75,6 +75,26 @@
"AAAEAQAABgAAAAEAAAAkAQAAASAAAAIAAABEAQAAARAAAAIAAADIAQAAAiAAABIAAADWAQAAAyAA"
"AAIAAAC1AgAAACAAAAEAAADIAgAAABAAAAEAAADYAgAA";
+// Dex file with 0-size (catch all only) catch handler unreferenced by try blocks.
+// Constructed by building a dex file with try/catch blocks and hex editing.
+static const char kUnreferenced0SizeCatchHandlerInputDex[] =
+ "ZGV4CjAzNQCEbEEvMstSNpQpjPdfMEfUBS48cis2QRJoAwAAcAAAAHhWNBIAAAAAAAAAAMgCAAAR"
+ "AAAAcAAAAAcAAAC0AAAAAwAAANAAAAABAAAA9AAAAAQAAAD8AAAAAQAAABwBAAAsAgAAPAEAAOoB"
+ "AADyAQAABAIAABMCAAAqAgAAPgIAAFICAABmAgAAaQIAAG0CAACCAgAAhgIAAIoCAACQAgAAlQIA"
+ "AJ4CAACiAgAAAgAAAAMAAAAEAAAABQAAAAYAAAAHAAAACQAAAAcAAAAFAAAAAAAAAAgAAAAFAAAA"
+ "3AEAAAgAAAAFAAAA5AEAAAQAAQANAAAAAAAAAAAAAAAAAAIADAAAAAEAAQAOAAAAAgAAAAAAAAAA"
+ "AAAAAQAAAAIAAAAAAAAAAQAAAAAAAAC5AgAAAAAAAAEAAQABAAAApgIAAAQAAABwEAMAAAAOAAQA"
+ "AQACAAIAqwIAAC8AAABiAAAAGgEPAG4gAgAQAGIAAAAaAQoAbiACABAAYgAAABoBEABuIAIAEABi"
+ "AAAAGgELAG4gAgAQAA4ADQBiAQAAGgIKAG4gAgAhACcADQBiAQAAGgILAG4gAgAhACcAAAAAAAAA"
+ "BwABAA4AAAAHAAEAAgAdACYAAAABAAAAAwAAAAEAAAAGAAY8aW5pdD4AEEhhbmRsZXJUZXN0Lmph"
+ "dmEADUxIYW5kbGVyVGVzdDsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGphdmEvbGFuZy9PYmpl"
+ "Y3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZhL2xhbmcvU3lzdGVtOwABVgACVkwAE1tMamF2"
+ "YS9sYW5nL1N0cmluZzsAAmYxAAJmMgAEbWFpbgADb3V0AAdwcmludGxuAAJ0MQACdDIAAQAHDgAE"
+ "AQAHDnl7eXkCeB2bAAAAAgAAgYAEvAIBCdQCAA0AAAAAAAAAAQAAAAAAAAABAAAAEQAAAHAAAAAC"
+ "AAAABwAAALQAAAADAAAAAwAAANAAAAAEAAAAAQAAAPQAAAAFAAAABAAAAPwAAAAGAAAAAQAAABwB"
+ "AAABIAAAAgAAADwBAAABEAAAAgAAANwBAAACIAAAEQAAAOoBAAADIAAAAgAAAKYCAAAAIAAAAQAA"
+ "ALkCAAAAEAAAAQAAAMgCAAA=";
+
// Dex file with multiple code items that have the same debug_info_off_. Constructed by a modified
// dexlayout on XandY.
static const char kDexFileDuplicateOffset[] =
@@ -145,6 +165,21 @@
"AAEAAAC4AAAAASAAAAIAAADYAAAAAiAAAAYAAAACAQAAAyAAAAIAAAAxAQAAACAAAAEAAAA7AQAA"
"ABAAAAEAAABMAQAA";
+// Dex file with class data section preceding code items.
+// Constructed by passing dex file through dexmerger tool and hex editing.
+static const char kClassDataBeforeCodeInputDex[] =
+ "ZGV4CjAzNQCZKmCu3XXn4zvxCh5VH0gZNNobEAcsc49EAgAAcAAAAHhWNBIAAAAAAAAAAAQBAAAJ"
+ "AAAAcAAAAAQAAACUAAAAAgAAAKQAAAAAAAAAAAAAAAUAAAC8AAAAAQAAAOQAAABAAQAABAEAAPgB"
+ "AAAAAgAACAIAAAsCAAAQAgAAJAIAACcCAAAqAgAALQIAAAIAAAADAAAABAAAAAUAAAACAAAAAAAA"
+ "AAAAAAAFAAAAAwAAAAAAAAABAAEAAAAAAAEAAAAGAAAAAQAAAAcAAAABAAAACAAAAAIAAQAAAAAA"
+ "AQAAAAEAAAACAAAAAAAAAAEAAAAAAAAAjAEAAAAAAAALAAAAAAAAAAEAAAAAAAAAAQAAAAkAAABw"
+ "AAAAAgAAAAQAAACUAAAAAwAAAAIAAACkAAAABQAAAAUAAAC8AAAABgAAAAEAAADkAAAAABAAAAEA"
+ "AAAEAQAAACAAAAEAAACMAQAAASAAAAQAAACkAQAAAiAAAAkAAAD4AQAAAyAAAAQAAAAwAgAAAAAB"
+ "AwCBgASkAwEBvAMBAdADAQHkAwAAAQABAAEAAAAwAgAABAAAAHAQBAAAAA4AAgABAAAAAAA1AgAA"
+ "AgAAABIQDwACAAEAAAAAADoCAAACAAAAEiAPAAIAAQAAAAAAPwIAAAIAAAASMA8ABjxpbml0PgAG"
+ "QS5qYXZhAAFJAANMQTsAEkxqYXZhL2xhbmcvT2JqZWN0OwABVgABYQABYgABYwABAAcOAAMABw4A"
+ "BgAHDgAJAAcOAA==";
+
static void WriteBase64ToFile(const char* base64, File* file) {
// Decode base64.
CHECK(base64 != nullptr);
@@ -282,8 +317,8 @@
return true;
}
- // Runs UnreferencedCatchHandlerTest.
- bool UnreferencedCatchHandlerExec(std::string* error_msg) {
+ // Runs UnreferencedCatchHandlerTest & Unreferenced0SizeCatchHandlerTest.
+ bool UnreferencedCatchHandlerExec(std::string* error_msg, const char* filename) {
ScratchFile tmp_file;
std::string tmp_name = tmp_file.GetFilename();
size_t tmp_last_slash = tmp_name.rfind("/");
@@ -291,7 +326,7 @@
// Write inputs and expected outputs.
std::string input_dex = tmp_dir + "classes.dex";
- WriteFileBase64(kUnreferencedCatchHandlerInputDex, input_dex.c_str());
+ WriteFileBase64(filename, input_dex.c_str());
std::string output_dex = tmp_dir + "classes.dex.new";
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
@@ -343,8 +378,18 @@
// Disable test on target.
TEST_DISABLED_FOR_TARGET();
std::string error_msg;
- ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg)) << error_msg;
+ ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg,
+ kUnreferencedCatchHandlerInputDex)) << error_msg;
}
+
+TEST_F(DexLayoutTest, Unreferenced0SizeCatchHandler) {
+ // Disable test on target.
+ TEST_DISABLED_FOR_TARGET();
+ std::string error_msg;
+ ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg,
+ kUnreferenced0SizeCatchHandlerInputDex)) << error_msg;
+}
+
TEST_F(DexLayoutTest, DuplicateOffset) {
ScratchFile temp;
WriteBase64ToFile(kDexFileDuplicateOffset, temp.GetFile());
@@ -418,4 +463,22 @@
}
}
+TEST_F(DexLayoutTest, ClassDataBeforeCode) {
+ ScratchFile temp;
+ WriteBase64ToFile(kClassDataBeforeCodeInputDex, temp.GetFile());
+ ScratchFile temp2;
+ WriteBase64ToFile(kDexFileLayoutInputProfile, temp2.GetFile());
+ EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+ EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-p", temp2.GetFilename(), "-o", "/dev/null", temp.GetFilename() };
+ std::string error_msg;
+ const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
+ EXPECT_TRUE(result);
+ if (!result) {
+ LOG(ERROR) << "Error " << error_msg;
+ }
+}
+
} // namespace art
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index ec8ae85..4f7b495 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -2048,11 +2048,12 @@
lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
#endif
slt $t1, $a2, $zero # if fromIndex < 0
-#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
+#if defined(_MIPS_ARCH_MIPS32R6)
seleqz $a2, $a2, $t1 # fromIndex = 0;
#else
movn $a2, $zero, $t1 # fromIndex = 0;
#endif
+
#if (STRING_COMPRESSION_FEATURE)
srl $t0, $a3, 1 # $a3 holds count (with flag) and $t0 holds actual length
#endif
diff --git a/runtime/arch/mips64/asm_support_mips64.S b/runtime/arch/mips64/asm_support_mips64.S
index 35f20fb..ef82bd2 100644
--- a/runtime/arch/mips64/asm_support_mips64.S
+++ b/runtime/arch/mips64/asm_support_mips64.S
@@ -70,14 +70,16 @@
// Macros to poison (negate) the reference for heap poisoning.
.macro POISON_HEAP_REF rRef
#ifdef USE_HEAP_POISONING
- subu \rRef, $zero, \rRef
+ dsubu \rRef, $zero, \rRef
+ dext \rRef, \rRef, 0, 32
#endif // USE_HEAP_POISONING
.endm
// Macros to unpoison (negate) the reference for heap poisoning.
.macro UNPOISON_HEAP_REF rRef
#ifdef USE_HEAP_POISONING
- subu \rRef, $zero, \rRef
+ dsubu \rRef, $zero, \rRef
+ dext \rRef, \rRef, 0, 32
#endif // USE_HEAP_POISONING
.endm
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index ff2dd1b..03fc959 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -73,7 +73,7 @@
}
if (auto_close_ && fd_ != -1) {
if (Close() != 0) {
- PLOG(WARNING) << "Failed to close file " << file_path_;
+ PLOG(WARNING) << "Failed to close file with fd=" << fd_ << " path=" << file_path_;
}
}
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index eaa35fe..cac5449 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1025,7 +1025,8 @@
class_loader->GetClass();
}
-static mirror::String* GetDexPathListElementName(ObjPtr<mirror::Object> element)
+static bool GetDexPathListElementName(ObjPtr<mirror::Object> element,
+ ObjPtr<mirror::String>* out_name)
REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* const dex_file_field =
jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
@@ -1037,17 +1038,20 @@
CHECK_EQ(dex_file_field->GetDeclaringClass(), element->GetClass()) << element->PrettyTypeOf();
ObjPtr<mirror::Object> dex_file = dex_file_field->GetObject(element);
if (dex_file == nullptr) {
- return nullptr;
+ // Null dex file means it was probably a jar with no dex files, return a null string.
+ *out_name = nullptr;
+ return true;
}
ObjPtr<mirror::Object> name_object = dex_file_name_field->GetObject(dex_file);
if (name_object != nullptr) {
- return name_object->AsString();
+ *out_name = name_object->AsString();
+ return true;
}
- return nullptr;
+ return false;
}
static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader,
- std::list<mirror::String*>* out_dex_file_names,
+ std::list<ObjPtr<mirror::String>>* out_dex_file_names,
std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(out_dex_file_names != nullptr);
@@ -1083,12 +1087,14 @@
*error_msg = StringPrintf("Null dex element at index %d", i);
return false;
}
- ObjPtr<mirror::String> const name = GetDexPathListElementName(element);
- if (name == nullptr) {
- *error_msg = StringPrintf("Null name for dex element at index %d", i);
+ ObjPtr<mirror::String> name;
+ if (!GetDexPathListElementName(element, &name)) {
+ *error_msg = StringPrintf("Invalid dex path list element at index %d", i);
return false;
}
- out_dex_file_names->push_front(name.Ptr());
+ if (name != nullptr) {
+ out_dex_file_names->push_front(name.Ptr());
+ }
}
}
}
@@ -1769,14 +1775,14 @@
*error_msg = "Unexpected BootClassLoader in app image";
return false;
}
- std::list<mirror::String*> image_dex_file_names;
+ std::list<ObjPtr<mirror::String>> image_dex_file_names;
std::string temp_error_msg;
if (!FlattenPathClassLoader(image_class_loader.Get(), &image_dex_file_names, &temp_error_msg)) {
*error_msg = StringPrintf("Failed to flatten image class loader hierarchy '%s'",
temp_error_msg.c_str());
return false;
}
- std::list<mirror::String*> loader_dex_file_names;
+ std::list<ObjPtr<mirror::String>> loader_dex_file_names;
if (!FlattenPathClassLoader(class_loader.Get(), &loader_dex_file_names, &temp_error_msg)) {
*error_msg = StringPrintf("Failed to flatten class loader hierarchy '%s'",
temp_error_msg.c_str());
@@ -1788,7 +1794,10 @@
ObjPtr<mirror::Object> element = elements->GetWithoutChecks(i);
if (element != nullptr) {
// If we are somewhere in the middle of the array, there may be nulls at the end.
- loader_dex_file_names.push_back(GetDexPathListElementName(element));
+ ObjPtr<mirror::String> name;
+ if (GetDexPathListElementName(element, &name) && name != nullptr) {
+ loader_dex_file_names.push_back(name);
+ }
}
}
// Ignore the number of image dex files since we are adding those to the class loader anyways.
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index b6a2e09..35e9d5d 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -179,6 +179,14 @@
std::string* error_msg) {
ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
CHECK(map.get() != nullptr);
+
+ if (map->Size() < sizeof(DexFile::Header)) {
+ *error_msg = StringPrintf(
+ "DexFile: failed to open dex file '%s' that is too short to have a header",
+ location.c_str());
+ return nullptr;
+ }
+
std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
map->Size(),
location,
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 86266e2..e77a5b8 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -80,7 +80,7 @@
}
// This should match RegionSpace::kRegionSize. static_assert'ed in concurrent_copying.h.
- static constexpr size_t kRegionSize = 1 * MB;
+ static constexpr size_t kRegionSize = 256 * KB;
private:
static constexpr uint64_t kHeapCapacity = 4ULL * GB; // low 4gb.
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 8f9c187..aea9708 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1644,10 +1644,10 @@
// Record freed objects.
TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
// Don't include thread-locals that are in the to-space.
- uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
- uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
- uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
- uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
+ const uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
+ const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
+ const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
+ const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes);
uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
@@ -1658,8 +1658,18 @@
}
CHECK_LE(to_objects, from_objects);
CHECK_LE(to_bytes, from_bytes);
- int64_t freed_bytes = from_bytes - to_bytes;
- int64_t freed_objects = from_objects - to_objects;
+ // cleared_bytes and cleared_objects may be greater than the from space equivalents since
+ // ClearFromSpace may clear empty unevac regions.
+ uint64_t cleared_bytes;
+ uint64_t cleared_objects;
+ {
+ TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
+ region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects);
+ CHECK_GE(cleared_bytes, from_bytes);
+ CHECK_GE(cleared_objects, from_objects);
+ }
+ int64_t freed_bytes = cleared_bytes - to_bytes;
+ int64_t freed_objects = cleared_objects - to_objects;
if (kVerboseMode) {
LOG(INFO) << "RecordFree:"
<< " from_bytes=" << from_bytes << " from_objects=" << from_objects
@@ -1678,11 +1688,6 @@
}
{
- TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
- region_space_->ClearFromSpace();
- }
-
- {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Sweep(false);
SwapBitmaps();
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 12b9701..b857ea3 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3559,11 +3559,8 @@
collector::GcType gc_type = collector_ran->GetGcType();
const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
// foreground.
- // Ensure at least 2.5 MB to temporarily fix excessive GC caused by TLAB ergonomics.
- const uint64_t adjusted_min_free = std::max(static_cast<uint64_t>(min_free_ * multiplier),
- static_cast<uint64_t>(5 * MB / 2));
- const uint64_t adjusted_max_free = std::max(static_cast<uint64_t>(max_free_ * multiplier),
- static_cast<uint64_t>(5 * MB / 2));
+ const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
+ const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
if (gc_type != collector::kGcTypeSticky) {
// Grow the heap for non sticky GC.
ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 5d282f1..5809027 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -78,7 +78,7 @@
for (size_t i = 0; i < num_regions_; ++i) {
Region* r = ®ions_[i];
if (r->IsFree()) {
- r->Unfree(time_);
+ r->Unfree(this, time_);
r->SetNewlyAllocated();
++num_non_free_regions_;
obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
@@ -91,7 +91,7 @@
for (size_t i = 0; i < num_regions_; ++i) {
Region* r = ®ions_[i];
if (r->IsFree()) {
- r->Unfree(time_);
+ r->Unfree(this, time_);
++num_non_free_regions_;
obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
CHECK(obj != nullptr);
@@ -233,14 +233,12 @@
continue;
}
if (r->IsLarge()) {
- if (r->LiveBytes() > 0) {
- // Avoid visiting dead large objects since they may contain dangling pointers to the
- // from-space.
- DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
- DCHECK(obj->GetClass() != nullptr);
- callback(obj, arg);
- }
+ // Avoid visiting dead large objects since they may contain dangling pointers to the
+ // from-space.
+ DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
+ DCHECK(obj->GetClass() != nullptr);
+ callback(obj, arg);
} else if (r->IsLargeTail()) {
// Do nothing.
} else {
@@ -314,13 +312,13 @@
DCHECK_EQ(left + num_regs, right);
Region* first_reg = ®ions_[left];
DCHECK(first_reg->IsFree());
- first_reg->UnfreeLarge(time_);
+ first_reg->UnfreeLarge(this, time_);
++num_non_free_regions_;
first_reg->SetTop(first_reg->Begin() + num_bytes);
for (size_t p = left + 1; p < right; ++p) {
DCHECK_LT(p, num_regions_);
DCHECK(regions_[p].IsFree());
- regions_[p].UnfreeLargeTail(time_);
+ regions_[p].UnfreeLargeTail(this, time_);
++num_non_free_regions_;
}
*bytes_allocated = num_bytes;
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 321524c..1ad4843 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -86,6 +86,7 @@
num_regions_ = mem_map_size / kRegionSize;
num_non_free_regions_ = 0U;
DCHECK_GT(num_regions_, 0U);
+ non_free_region_index_limit_ = 0U;
regions_.reset(new Region[num_regions_]);
uint8_t* region_addr = mem_map->Begin();
for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
@@ -192,7 +193,11 @@
MutexLock mu(Thread::Current(), region_lock_);
size_t num_expected_large_tails = 0;
bool prev_large_evacuated = false;
- for (size_t i = 0; i < num_regions_; ++i) {
+ VerifyNonFreeRegionLimit();
+ const size_t iter_limit = kUseTableLookupReadBarrier
+ ? num_regions_
+ : std::min(num_regions_, non_free_region_index_limit_);
+ for (size_t i = 0; i < iter_limit; ++i) {
Region* r = ®ions_[i];
RegionState state = r->State();
RegionType type = r->Type();
@@ -236,18 +241,50 @@
}
}
}
+ DCHECK_EQ(num_expected_large_tails, 0U);
current_region_ = &full_region_;
evac_region_ = &full_region_;
}
-void RegionSpace::ClearFromSpace() {
+void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) {
+ DCHECK(cleared_bytes != nullptr);
+ DCHECK(cleared_objects != nullptr);
+ *cleared_bytes = 0;
+ *cleared_objects = 0;
MutexLock mu(Thread::Current(), region_lock_);
- for (size_t i = 0; i < num_regions_; ++i) {
+ VerifyNonFreeRegionLimit();
+ size_t new_non_free_region_index_limit = 0;
+ for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
Region* r = ®ions_[i];
if (r->IsInFromSpace()) {
- r->Clear();
+ *cleared_bytes += r->BytesAllocated();
+ *cleared_objects += r->ObjectsAllocated();
--num_non_free_regions_;
+ r->Clear();
} else if (r->IsInUnevacFromSpace()) {
+ if (r->LiveBytes() == 0) {
+ // Special case for 0 live bytes, this means all of the objects in the region are dead and
+ // we can clear it. This is important for large objects since we must not visit dead ones in
+ // RegionSpace::Walk because they may contain dangling references to invalid objects.
+ // It is also better to clear these regions now instead of at the end of the next GC to
+ // save RAM. If we don't clear the regions here, they will be cleared next GC by the normal
+ // live percent evacuation logic.
+ size_t free_regions = 1;
+ // Also release RAM for large tails.
+ while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
+ DCHECK(r->IsLarge());
+ regions_[i + free_regions].Clear();
+ ++free_regions;
+ }
+ *cleared_bytes += r->BytesAllocated();
+ *cleared_objects += r->ObjectsAllocated();
+ num_non_free_regions_ -= free_regions;
+ r->Clear();
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(r->Begin()),
+ reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
+ continue;
+ }
size_t full_count = 0;
while (r->IsInUnevacFromSpace()) {
Region* const cur = ®ions_[i + full_count];
@@ -255,6 +292,7 @@
cur->LiveBytes() != static_cast<size_t>(cur->Top() - cur->Begin())) {
break;
}
+ DCHECK(cur->IsInUnevacFromSpace());
if (full_count != 0) {
cur->SetUnevacFromSpaceAsToSpace();
}
@@ -271,7 +309,15 @@
i += full_count - 1;
}
}
+ // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above.
+ Region* last_checked_region = ®ions_[i];
+ if (!last_checked_region->IsFree()) {
+ new_non_free_region_index_limit = std::max(new_non_free_region_index_limit,
+ last_checked_region->Idx() + 1);
+ }
}
+ // Update non_free_region_index_limit_.
+ SetNonFreeRegionLimit(new_non_free_region_index_limit);
evac_region_ = nullptr;
}
@@ -324,6 +370,7 @@
}
r->Clear();
}
+ SetNonFreeRegionLimit(0);
current_region_ = &full_region_;
evac_region_ = &full_region_;
}
@@ -390,7 +437,7 @@
for (size_t i = 0; i < num_regions_; ++i) {
Region* r = ®ions_[i];
if (r->IsFree()) {
- r->Unfree(time_);
+ r->Unfree(this, time_);
++num_non_free_regions_;
r->SetNewlyAllocated();
r->SetTop(r->End());
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index da36f5c..2537929 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -167,7 +167,7 @@
// Object alignment within the space.
static constexpr size_t kAlignment = kObjectAlignment;
// The region size.
- static constexpr size_t kRegionSize = 1 * MB;
+ static constexpr size_t kRegionSize = 256 * KB;
bool IsInFromSpace(mirror::Object* ref) {
if (HasAddress(ref)) {
@@ -215,7 +215,7 @@
size_t FromSpaceSize() REQUIRES(!region_lock_);
size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
size_t ToSpaceSize() REQUIRES(!region_lock_);
- void ClearFromSpace() REQUIRES(!region_lock_);
+ void ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) REQUIRES(!region_lock_);
void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
Region* reg = RefToRegionUnlocked(ref);
@@ -308,25 +308,31 @@
}
// Given a free region, declare it non-free (allocated).
- void Unfree(uint32_t alloc_time) {
+ void Unfree(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_) {
DCHECK(IsFree());
state_ = RegionState::kRegionStateAllocated;
type_ = RegionType::kRegionTypeToSpace;
alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
}
- void UnfreeLarge(uint32_t alloc_time) {
+ void UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_) {
DCHECK(IsFree());
state_ = RegionState::kRegionStateLarge;
type_ = RegionType::kRegionTypeToSpace;
alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
}
- void UnfreeLargeTail(uint32_t alloc_time) {
+ void UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_) {
DCHECK(IsFree());
state_ = RegionState::kRegionStateLargeTail;
type_ = RegionType::kRegionTypeToSpace;
alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
}
void SetNewlyAllocated() {
@@ -342,7 +348,7 @@
bool IsLarge() const {
bool is_large = state_ == RegionState::kRegionStateLarge;
if (is_large) {
- DCHECK_LT(begin_ + 1 * MB, Top());
+ DCHECK_LT(begin_ + kRegionSize, Top());
}
return is_large;
}
@@ -429,7 +435,7 @@
size_t ObjectsAllocated() const {
if (IsLarge()) {
- DCHECK_LT(begin_ + 1 * MB, Top());
+ DCHECK_LT(begin_ + kRegionSize, Top());
DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
return 1;
} else if (IsLargeTail()) {
@@ -520,6 +526,27 @@
mirror::Object* GetNextObject(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_);
+ void AdjustNonFreeRegionLimit(size_t new_non_free_region_index) REQUIRES(region_lock_) {
+ DCHECK_LT(new_non_free_region_index, num_regions_);
+ non_free_region_index_limit_ = std::max(non_free_region_index_limit_,
+ new_non_free_region_index + 1);
+ VerifyNonFreeRegionLimit();
+ }
+
+ void SetNonFreeRegionLimit(size_t new_non_free_region_index_limit) REQUIRES(region_lock_) {
+ DCHECK_LE(new_non_free_region_index_limit, num_regions_);
+ non_free_region_index_limit_ = new_non_free_region_index_limit;
+ VerifyNonFreeRegionLimit();
+ }
+
+ void VerifyNonFreeRegionLimit() REQUIRES(region_lock_) {
+ if (kIsDebugBuild && non_free_region_index_limit_ < num_regions_) {
+ for (size_t i = non_free_region_index_limit_; i < num_regions_; ++i) {
+ CHECK(regions_[i].IsFree());
+ }
+ }
+ }
+
Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
uint32_t time_; // The time as the number of collections since the startup.
@@ -527,6 +554,10 @@
size_t num_non_free_regions_; // The number of non-free regions in this space.
std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
// The pointer to the region array.
+ // The upper-bound index of the non-free regions. Used to avoid scanning all regions in
+ // SetFromSpace(). Invariant: for all i >= non_free_region_index_limit_, regions_[i].IsFree() is
+ // true.
+ size_t non_free_region_index_limit_ GUARDED_BY(region_lock_);
Region* current_region_; // The region that's being allocated currently.
Region* evac_region_; // The region that's being evacuated to currently.
Region full_region_; // The dummy/sentinel region that looks full.
diff --git a/runtime/image.cc b/runtime/image.cc
index 243051e..88f28f3 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -25,7 +25,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '9', '\0' }; // Enable string compression.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '4', '0', '\0' }; // Integer.valueOf intrinsic
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/interpreter/mterp/arm/op_sget.S b/runtime/interpreter/mterp/arm/op_sget.S
index 2b81f50..3c813ef 100644
--- a/runtime/interpreter/mterp/arm/op_sget.S
+++ b/runtime/interpreter/mterp/arm/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32StaticFromCode" }
+%default { "is_object":"0", "helper":"MterpGet32Static" }
/*
* General SGET handler wrapper.
*
diff --git a/runtime/interpreter/mterp/arm/op_sget_boolean.S b/runtime/interpreter/mterp/arm/op_sget_boolean.S
index ebfb44c..eb06aa8 100644
--- a/runtime/interpreter/mterp/arm/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/arm/op_sget_boolean.S
@@ -1 +1 @@
-%include "arm/op_sget.S" {"helper":"artGetBooleanStaticFromCode"}
+%include "arm/op_sget.S" {"helper":"MterpGetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_byte.S b/runtime/interpreter/mterp/arm/op_sget_byte.S
index d76862e..9f4c904 100644
--- a/runtime/interpreter/mterp/arm/op_sget_byte.S
+++ b/runtime/interpreter/mterp/arm/op_sget_byte.S
@@ -1 +1 @@
-%include "arm/op_sget.S" {"helper":"artGetByteStaticFromCode"}
+%include "arm/op_sget.S" {"helper":"MterpGetByteStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_char.S b/runtime/interpreter/mterp/arm/op_sget_char.S
index b7fcfc2..dd8c991 100644
--- a/runtime/interpreter/mterp/arm/op_sget_char.S
+++ b/runtime/interpreter/mterp/arm/op_sget_char.S
@@ -1 +1 @@
-%include "arm/op_sget.S" {"helper":"artGetCharStaticFromCode"}
+%include "arm/op_sget.S" {"helper":"MterpGetCharStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_object.S b/runtime/interpreter/mterp/arm/op_sget_object.S
index 8e7d075..e1d9eae 100644
--- a/runtime/interpreter/mterp/arm/op_sget_object.S
+++ b/runtime/interpreter/mterp/arm/op_sget_object.S
@@ -1 +1 @@
-%include "arm/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
+%include "arm/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_short.S b/runtime/interpreter/mterp/arm/op_sget_short.S
index 3e80f0d..c0d61c4 100644
--- a/runtime/interpreter/mterp/arm/op_sget_short.S
+++ b/runtime/interpreter/mterp/arm/op_sget_short.S
@@ -1 +1 @@
-%include "arm/op_sget.S" {"helper":"artGetShortStaticFromCode"}
+%include "arm/op_sget.S" {"helper":"MterpGetShortStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_wide.S b/runtime/interpreter/mterp/arm/op_sget_wide.S
index 4f2f89d..aeee016 100644
--- a/runtime/interpreter/mterp/arm/op_sget_wide.S
+++ b/runtime/interpreter/mterp/arm/op_sget_wide.S
@@ -4,12 +4,12 @@
*/
/* sget-wide vAA, field@BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGet64StaticFromCode
+ bl MterpGet64Static
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r9, rINST, lsr #8 @ r9<- AA
VREG_INDEX_TO_ADDR lr, r9 @ r9<- &fp[AA]
diff --git a/runtime/interpreter/mterp/arm/op_sput.S b/runtime/interpreter/mterp/arm/op_sput.S
index 7e0c1a6..494df8a 100644
--- a/runtime/interpreter/mterp/arm/op_sput.S
+++ b/runtime/interpreter/mterp/arm/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"artSet32StaticFromCode"}
+%default { "helper":"MterpSet32Static"}
/*
* General SPUT handler wrapper.
*
diff --git a/runtime/interpreter/mterp/arm/op_sput_boolean.S b/runtime/interpreter/mterp/arm/op_sput_boolean.S
index e3bbf2b..47bed0a 100644
--- a/runtime/interpreter/mterp/arm/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/arm/op_sput_boolean.S
@@ -1 +1 @@
-%include "arm/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "arm/op_sput.S" {"helper":"MterpSetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_byte.S b/runtime/interpreter/mterp/arm/op_sput_byte.S
index e3bbf2b..b4d22b4 100644
--- a/runtime/interpreter/mterp/arm/op_sput_byte.S
+++ b/runtime/interpreter/mterp/arm/op_sput_byte.S
@@ -1 +1 @@
-%include "arm/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "arm/op_sput.S" {"helper":"MterpSetByteStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_char.S b/runtime/interpreter/mterp/arm/op_sput_char.S
index d8d65cb..58a957d 100644
--- a/runtime/interpreter/mterp/arm/op_sput_char.S
+++ b/runtime/interpreter/mterp/arm/op_sput_char.S
@@ -1 +1 @@
-%include "arm/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "arm/op_sput.S" {"helper":"MterpSetCharStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_short.S b/runtime/interpreter/mterp/arm/op_sput_short.S
index d8d65cb..88c3211 100644
--- a/runtime/interpreter/mterp/arm/op_sput_short.S
+++ b/runtime/interpreter/mterp/arm/op_sput_short.S
@@ -1 +1 @@
-%include "arm/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "arm/op_sput.S" {"helper":"MterpSetShortStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_wide.S b/runtime/interpreter/mterp/arm/op_sput_wide.S
index 8d8ed8c..1e8fcc9 100644
--- a/runtime/interpreter/mterp/arm/op_sput_wide.S
+++ b/runtime/interpreter/mterp/arm/op_sput_wide.S
@@ -3,15 +3,15 @@
*
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
- ldr r1, [rFP, #OFF_FP_METHOD]
- mov r2, rINST, lsr #8 @ r3<- AA
- VREG_INDEX_TO_ADDR r2, r2
+ mov r1, rINST, lsr #8 @ r1<- AA
+ VREG_INDEX_TO_ADDR r1, r1
+ ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl artSet64IndirectStaticFromMterp
+ bl MterpSet64Static
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
diff --git a/runtime/interpreter/mterp/arm64/op_sget.S b/runtime/interpreter/mterp/arm64/op_sget.S
index 6352ce0..84e71ac 100644
--- a/runtime/interpreter/mterp/arm64/op_sget.S
+++ b/runtime/interpreter/mterp/arm64/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32StaticFromCode", "extend":"" }
+%default { "is_object":"0", "helper":"MterpGet32Static", "extend":"" }
/*
* General SGET handler wrapper.
*
diff --git a/runtime/interpreter/mterp/arm64/op_sget_boolean.S b/runtime/interpreter/mterp/arm64/op_sget_boolean.S
index c40dbdd..868f41c 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_boolean.S
@@ -1 +1 @@
-%include "arm64/op_sget.S" {"helper":"artGetBooleanStaticFromCode", "extend":"uxtb w0, w0"}
+%include "arm64/op_sget.S" {"helper":"MterpGetBooleanStatic", "extend":"uxtb w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_byte.S b/runtime/interpreter/mterp/arm64/op_sget_byte.S
index 6cf69a3..e135aa7 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_byte.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_byte.S
@@ -1 +1 @@
-%include "arm64/op_sget.S" {"helper":"artGetByteStaticFromCode", "extend":"sxtb w0, w0"}
+%include "arm64/op_sget.S" {"helper":"MterpGetByteStatic", "extend":"sxtb w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_char.S b/runtime/interpreter/mterp/arm64/op_sget_char.S
index 8924a34..05d57ac 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_char.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_char.S
@@ -1 +1 @@
-%include "arm64/op_sget.S" {"helper":"artGetCharStaticFromCode", "extend":"uxth w0, w0"}
+%include "arm64/op_sget.S" {"helper":"MterpGetCharStatic", "extend":"uxth w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_object.S b/runtime/interpreter/mterp/arm64/op_sget_object.S
index 620b0ba..1faaf6e 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_object.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_object.S
@@ -1 +1 @@
-%include "arm64/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
+%include "arm64/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_short.S b/runtime/interpreter/mterp/arm64/op_sget_short.S
index 19dbba6..5900231 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_short.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_short.S
@@ -1 +1 @@
-%include "arm64/op_sget.S" {"helper":"artGetShortStaticFromCode", "extend":"sxth w0, w0"}
+%include "arm64/op_sget.S" {"helper":"MterpGetShortStatic", "extend":"sxth w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_wide.S b/runtime/interpreter/mterp/arm64/op_sget_wide.S
index 287f66d..92f3f7d 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_wide.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_wide.S
@@ -4,12 +4,12 @@
*/
/* sget-wide vAA, field//BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64StaticFromCode
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGet64StaticFromCode
+ bl MterpGet64Static
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w4, wINST, #8 // w4<- AA
cbnz x3, MterpException // bail out
diff --git a/runtime/interpreter/mterp/arm64/op_sput.S b/runtime/interpreter/mterp/arm64/op_sput.S
index 75f27ab..e322af0 100644
--- a/runtime/interpreter/mterp/arm64/op_sput.S
+++ b/runtime/interpreter/mterp/arm64/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"artSet32StaticFromCode"}
+%default { "helper":"MterpSet32Static"}
/*
* General SPUT handler wrapper.
*
diff --git a/runtime/interpreter/mterp/arm64/op_sput_boolean.S b/runtime/interpreter/mterp/arm64/op_sput_boolean.S
index 11c55e5..9928f31 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_boolean.S
@@ -1 +1 @@
-%include "arm64/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "arm64/op_sput.S" {"helper":"MterpSetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_byte.S b/runtime/interpreter/mterp/arm64/op_sput_byte.S
index 11c55e5..16d6ba9 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_byte.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_byte.S
@@ -1 +1 @@
-%include "arm64/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "arm64/op_sput.S" {"helper":"MterpSetByteStatic"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_char.S b/runtime/interpreter/mterp/arm64/op_sput_char.S
index b4dd5aa..ab5e815 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_char.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_char.S
@@ -1 +1 @@
-%include "arm64/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "arm64/op_sput.S" {"helper":"MterpSetCharStatic"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_short.S b/runtime/interpreter/mterp/arm64/op_sput_short.S
index b4dd5aa..b54f88a 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_short.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_short.S
@@ -1 +1 @@
-%include "arm64/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "arm64/op_sput.S" {"helper":"MterpSetShortStatic"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_wide.S b/runtime/interpreter/mterp/arm64/op_sput_wide.S
index a79b1a6..4aeb8ff 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_wide.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_wide.S
@@ -3,15 +3,15 @@
*
*/
/* sput-wide vAA, field//BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
- ldr x1, [xFP, #OFF_FP_METHOD]
- lsr w2, wINST, #8 // w3<- AA
- VREG_INDEX_TO_ADDR x2, w2
+ lsr w1, wINST, #8 // w1<- AA
+ VREG_INDEX_TO_ADDR x1, w1
+ ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl artSet64IndirectStaticFromMterp
+ bl MterpSet64Static
cbnz w0, MterpException // 0 on success, -1 on failure
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from wINST
diff --git a/runtime/interpreter/mterp/mips/op_sget.S b/runtime/interpreter/mterp/mips/op_sget.S
index 64ece1e..635df8a 100644
--- a/runtime/interpreter/mterp/mips/op_sget.S
+++ b/runtime/interpreter/mterp/mips/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32StaticFromCode" }
+%default { "is_object":"0", "helper":"MterpGet32Static" }
/*
* General SGET handler.
*
diff --git a/runtime/interpreter/mterp/mips/op_sget_boolean.S b/runtime/interpreter/mterp/mips/op_sget_boolean.S
index 45a5a70..7829970 100644
--- a/runtime/interpreter/mterp/mips/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/mips/op_sget_boolean.S
@@ -1 +1 @@
-%include "mips/op_sget.S" {"helper":"artGetBooleanStaticFromCode"}
+%include "mips/op_sget.S" {"helper":"MterpGetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_byte.S b/runtime/interpreter/mterp/mips/op_sget_byte.S
index 319122c..ee08342 100644
--- a/runtime/interpreter/mterp/mips/op_sget_byte.S
+++ b/runtime/interpreter/mterp/mips/op_sget_byte.S
@@ -1 +1 @@
-%include "mips/op_sget.S" {"helper":"artGetByteStaticFromCode"}
+%include "mips/op_sget.S" {"helper":"MterpGetByteStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_char.S b/runtime/interpreter/mterp/mips/op_sget_char.S
index 7103847..d8b477a 100644
--- a/runtime/interpreter/mterp/mips/op_sget_char.S
+++ b/runtime/interpreter/mterp/mips/op_sget_char.S
@@ -1 +1 @@
-%include "mips/op_sget.S" {"helper":"artGetCharStaticFromCode"}
+%include "mips/op_sget.S" {"helper":"MterpGetCharStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_object.S b/runtime/interpreter/mterp/mips/op_sget_object.S
index b205f51..2dc00c3 100644
--- a/runtime/interpreter/mterp/mips/op_sget_object.S
+++ b/runtime/interpreter/mterp/mips/op_sget_object.S
@@ -1 +1 @@
-%include "mips/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
+%include "mips/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_short.S b/runtime/interpreter/mterp/mips/op_sget_short.S
index 3301823..ab55d93 100644
--- a/runtime/interpreter/mterp/mips/op_sget_short.S
+++ b/runtime/interpreter/mterp/mips/op_sget_short.S
@@ -1 +1 @@
-%include "mips/op_sget.S" {"helper":"artGetShortStaticFromCode"}
+%include "mips/op_sget.S" {"helper":"MterpGetShortStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_wide.S b/runtime/interpreter/mterp/mips/op_sget_wide.S
index c729250..ec4295a 100644
--- a/runtime/interpreter/mterp/mips/op_sget_wide.S
+++ b/runtime/interpreter/mterp/mips/op_sget_wide.S
@@ -2,12 +2,12 @@
* 64-bit SGET handler.
*/
/* sget-wide vAA, field@BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGet64StaticFromCode)
+ JAL(MterpGet64Static)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
bnez a3, MterpException
GET_OPA(a1) # a1 <- AA
diff --git a/runtime/interpreter/mterp/mips/op_sput.S b/runtime/interpreter/mterp/mips/op_sput.S
index 7034a0e..37f8687 100644
--- a/runtime/interpreter/mterp/mips/op_sput.S
+++ b/runtime/interpreter/mterp/mips/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"artSet32StaticFromCode"}
+%default { "helper":"MterpSet32Static"}
/*
* General SPUT handler.
*
diff --git a/runtime/interpreter/mterp/mips/op_sput_boolean.S b/runtime/interpreter/mterp/mips/op_sput_boolean.S
index 7909ef5..6426cd4 100644
--- a/runtime/interpreter/mterp/mips/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/mips/op_sput_boolean.S
@@ -1 +1 @@
-%include "mips/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "mips/op_sput.S" {"helper":"MterpSetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_byte.S b/runtime/interpreter/mterp/mips/op_sput_byte.S
index 7909ef5..c68d18f 100644
--- a/runtime/interpreter/mterp/mips/op_sput_byte.S
+++ b/runtime/interpreter/mterp/mips/op_sput_byte.S
@@ -1 +1 @@
-%include "mips/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "mips/op_sput.S" {"helper":"MterpSetByteStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_char.S b/runtime/interpreter/mterp/mips/op_sput_char.S
index 188195c..9b8983e 100644
--- a/runtime/interpreter/mterp/mips/op_sput_char.S
+++ b/runtime/interpreter/mterp/mips/op_sput_char.S
@@ -1 +1 @@
-%include "mips/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "mips/op_sput.S" {"helper":"MterpSetCharStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_short.S b/runtime/interpreter/mterp/mips/op_sput_short.S
index 188195c..5a57ed9 100644
--- a/runtime/interpreter/mterp/mips/op_sput_short.S
+++ b/runtime/interpreter/mterp/mips/op_sput_short.S
@@ -1 +1 @@
-%include "mips/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "mips/op_sput.S" {"helper":"MterpSetShortStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_wide.S b/runtime/interpreter/mterp/mips/op_sput_wide.S
index 3b347fc..c090007 100644
--- a/runtime/interpreter/mterp/mips/op_sput_wide.S
+++ b/runtime/interpreter/mterp/mips/op_sput_wide.S
@@ -2,15 +2,15 @@
* 64-bit SPUT handler.
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
- lw a1, OFF_FP_METHOD(rFP) # a1 <- method
- GET_OPA(a2) # a2 <- AA
- EAS2(a2, rFP, a2) # a2 <- &fp[AA]
+ GET_OPA(a1) # a1 <- AA
+ EAS2(a1, rFP, a1) # a1 <- &fp[AA]
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(artSet64IndirectStaticFromMterp)
+ JAL(MterpSet64Static)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mips64/op_sget.S b/runtime/interpreter/mterp/mips64/op_sget.S
index bd2cfe3..71046db 100644
--- a/runtime/interpreter/mterp/mips64/op_sget.S
+++ b/runtime/interpreter/mterp/mips64/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32StaticFromCode", "extend":"" }
+%default { "is_object":"0", "helper":"MterpGet32Static", "extend":"" }
/*
* General SGET handler wrapper.
*
diff --git a/runtime/interpreter/mterp/mips64/op_sget_boolean.S b/runtime/interpreter/mterp/mips64/op_sget_boolean.S
index e7b1844..ec1ce9e 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_boolean.S
@@ -1 +1 @@
-%include "mips64/op_sget.S" {"helper":"artGetBooleanStaticFromCode", "extend":"and v0, v0, 0xff"}
+%include "mips64/op_sget.S" {"helper":"MterpGetBooleanStatic", "extend":"and v0, v0, 0xff"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_byte.S b/runtime/interpreter/mterp/mips64/op_sget_byte.S
index 52a2e4a..6a802f6 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_byte.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_byte.S
@@ -1 +1 @@
-%include "mips64/op_sget.S" {"helper":"artGetByteStaticFromCode", "extend":"seb v0, v0"}
+%include "mips64/op_sget.S" {"helper":"MterpGetByteStatic", "extend":"seb v0, v0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_char.S b/runtime/interpreter/mterp/mips64/op_sget_char.S
index 873d82a..483d085 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_char.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_char.S
@@ -1 +1 @@
-%include "mips64/op_sget.S" {"helper":"artGetCharStaticFromCode", "extend":"and v0, v0, 0xffff"}
+%include "mips64/op_sget.S" {"helper":"MterpGetCharStatic", "extend":"and v0, v0, 0xffff"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_object.S b/runtime/interpreter/mterp/mips64/op_sget_object.S
index 3108417..2250696 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_object.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_object.S
@@ -1 +1 @@
-%include "mips64/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
+%include "mips64/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_short.S b/runtime/interpreter/mterp/mips64/op_sget_short.S
index fed4e76..b257bbb 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_short.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_short.S
@@ -1 +1 @@
-%include "mips64/op_sget.S" {"helper":"artGetShortStaticFromCode", "extend":"seh v0, v0"}
+%include "mips64/op_sget.S" {"helper":"MterpGetShortStatic", "extend":"seh v0, v0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_wide.S b/runtime/interpreter/mterp/mips64/op_sget_wide.S
index 77124d1..ace64f8 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_wide.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_wide.S
@@ -3,12 +3,12 @@
*
*/
/* sget-wide vAA, field//BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGet64StaticFromCode
+ jal MterpGet64Static
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a4, rINST, 8 # a4 <- AA
bnez a3, MterpException # bail out
diff --git a/runtime/interpreter/mterp/mips64/op_sput.S b/runtime/interpreter/mterp/mips64/op_sput.S
index 142f18f..466f333 100644
--- a/runtime/interpreter/mterp/mips64/op_sput.S
+++ b/runtime/interpreter/mterp/mips64/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"artSet32StaticFromCode" }
+%default { "helper":"MterpSet32Static" }
/*
* General SPUT handler wrapper.
*
diff --git a/runtime/interpreter/mterp/mips64/op_sput_boolean.S b/runtime/interpreter/mterp/mips64/op_sput_boolean.S
index f5b8dbf..eba58f7 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_boolean.S
@@ -1 +1 @@
-%include "mips64/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "mips64/op_sput.S" {"helper":"MterpSetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_byte.S b/runtime/interpreter/mterp/mips64/op_sput_byte.S
index f5b8dbf..80a26c0 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_byte.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_byte.S
@@ -1 +1 @@
-%include "mips64/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "mips64/op_sput.S" {"helper":"MterpSetByteStatic"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_char.S b/runtime/interpreter/mterp/mips64/op_sput_char.S
index c4d195c..c0d5bf3 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_char.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_char.S
@@ -1 +1 @@
-%include "mips64/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "mips64/op_sput.S" {"helper":"MterpSetCharStatic"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_short.S b/runtime/interpreter/mterp/mips64/op_sput_short.S
index c4d195c..b001832 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_short.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_short.S
@@ -1 +1 @@
-%include "mips64/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "mips64/op_sput.S" {"helper":"MterpSetShortStatic"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_wide.S b/runtime/interpreter/mterp/mips64/op_sput_wide.S
index 828ddc1..aa3d5b4 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_wide.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_wide.S
@@ -3,15 +3,15 @@
*
*/
/* sput-wide vAA, field//BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
- ld a1, OFF_FP_METHOD(rFP)
- srl a2, rINST, 8 # a2 <- AA
- dlsa a2, a2, rFP, 2
+ srl a1, rINST, 8 # a2 <- AA
+ dlsa a1, a1, rFP, 2
+ ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal artSet64IndirectStaticFromMterp
+ jal MterpSet64Static
bnezc v0, MterpException # 0 on success, -1 on failure
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 75ab91a..8bf094e 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -587,27 +587,6 @@
return MterpShouldSwitchInterpreters();
}
-extern "C" ssize_t artSet64IndirectStaticFromMterp(uint32_t field_idx,
- ArtMethod* referrer,
- uint64_t* new_value,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(field->GetDeclaringClass(), *new_value);
- return 0; // success
- }
- field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int64_t));
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(field->GetDeclaringClass(), *new_value);
- return 0; // success
- }
- return -1; // failure
-}
-
extern "C" ssize_t artSet8InstanceFromMterp(uint32_t field_idx,
mirror::Object* obj,
uint8_t new_value,
@@ -689,7 +668,187 @@
return -1; // failure
}
-extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr, int32_t index)
+template <typename return_type, Primitive::Type primitive_type>
+ALWAYS_INLINE return_type MterpGetStatic(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self,
+ return_type (ArtField::*func)(ObjPtr<mirror::Object>))
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return_type res = 0; // On exception, the result will be ignored.
+ ArtField* f =
+ FindFieldFromCode<StaticPrimitiveRead, false>(field_idx,
+ referrer,
+ self,
+ primitive_type);
+ if (LIKELY(f != nullptr)) {
+ ObjPtr<mirror::Object> obj = f->GetDeclaringClass();
+ res = (f->*func)(obj);
+ }
+ return res;
+}
+
+extern "C" int32_t MterpGetBooleanStatic(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpGetStatic<uint8_t, Primitive::kPrimBoolean>(field_idx,
+ referrer,
+ self,
+ &ArtField::GetBoolean);
+}
+
+extern "C" int32_t MterpGetByteStatic(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpGetStatic<int8_t, Primitive::kPrimByte>(field_idx,
+ referrer,
+ self,
+ &ArtField::GetByte);
+}
+
+extern "C" uint32_t MterpGetCharStatic(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpGetStatic<uint16_t, Primitive::kPrimChar>(field_idx,
+ referrer,
+ self,
+ &ArtField::GetChar);
+}
+
+extern "C" int32_t MterpGetShortStatic(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpGetStatic<int16_t, Primitive::kPrimShort>(field_idx,
+ referrer,
+ self,
+ &ArtField::GetShort);
+}
+
+extern "C" mirror::Object* MterpGetObjStatic(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpGetStatic<ObjPtr<mirror::Object>, Primitive::kPrimNot>(field_idx,
+ referrer,
+ self,
+ &ArtField::GetObject).Ptr();
+}
+
+extern "C" int32_t MterpGet32Static(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpGetStatic<int32_t, Primitive::kPrimInt>(field_idx,
+ referrer,
+ self,
+ &ArtField::GetInt);
+}
+
+extern "C" int64_t MterpGet64Static(uint32_t field_idx, ArtMethod* referrer, Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpGetStatic<int64_t, Primitive::kPrimLong>(field_idx,
+ referrer,
+ self,
+ &ArtField::GetLong);
+}
+
+
+template <typename field_type, Primitive::Type primitive_type>
+int MterpSetStatic(uint32_t field_idx,
+ field_type new_value,
+ ArtMethod* referrer,
+ Thread* self,
+ void (ArtField::*func)(ObjPtr<mirror::Object>, field_type val))
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ int res = 0; // Assume success (following quick_field_entrypoints conventions)
+ ArtField* f =
+ FindFieldFromCode<StaticPrimitiveWrite, false>(field_idx, referrer, self, primitive_type);
+ if (LIKELY(f != nullptr)) {
+ ObjPtr<mirror::Object> obj = f->GetDeclaringClass();
+ (f->*func)(obj, new_value);
+ } else {
+ res = -1; // Failure
+ }
+ return res;
+}
+
+extern "C" int MterpSetBooleanStatic(uint32_t field_idx,
+ uint8_t new_value,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpSetStatic<uint8_t, Primitive::kPrimBoolean>(field_idx,
+ new_value,
+ referrer,
+ self,
+ &ArtField::SetBoolean<false>);
+}
+
+extern "C" int MterpSetByteStatic(uint32_t field_idx,
+ int8_t new_value,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpSetStatic<int8_t, Primitive::kPrimByte>(field_idx,
+ new_value,
+ referrer,
+ self,
+ &ArtField::SetByte<false>);
+}
+
+extern "C" int MterpSetCharStatic(uint32_t field_idx,
+ uint16_t new_value,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpSetStatic<uint16_t, Primitive::kPrimChar>(field_idx,
+ new_value,
+ referrer,
+ self,
+ &ArtField::SetChar<false>);
+}
+
+extern "C" int MterpSetShortStatic(uint32_t field_idx,
+ int16_t new_value,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpSetStatic<int16_t, Primitive::kPrimShort>(field_idx,
+ new_value,
+ referrer,
+ self,
+ &ArtField::SetShort<false>);
+}
+
+extern "C" int MterpSet32Static(uint32_t field_idx,
+ int32_t new_value,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpSetStatic<int32_t, Primitive::kPrimInt>(field_idx,
+ new_value,
+ referrer,
+ self,
+ &ArtField::SetInt<false>);
+}
+
+extern "C" int MterpSet64Static(uint32_t field_idx,
+ int64_t* new_value,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpSetStatic<int64_t, Primitive::kPrimLong>(field_idx,
+ *new_value,
+ referrer,
+ self,
+ &ArtField::SetLong<false>);
+}
+
+extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr,
+ int32_t index)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(arr == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
@@ -703,7 +862,8 @@
}
}
-extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj, uint32_t field_offset)
+extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj,
+ uint32_t field_offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index 576020f..e2b693f 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -2631,12 +2631,12 @@
*/
/* op vAA, field@BBBB */
- .extern artGet32StaticFromCode
+ .extern MterpGet32Static
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGet32StaticFromCode
+ bl MterpGet32Static
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2661,12 +2661,12 @@
*/
/* sget-wide vAA, field@BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGet64StaticFromCode
+ bl MterpGet64Static
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r9, rINST, lsr #8 @ r9<- AA
VREG_INDEX_TO_ADDR lr, r9 @ r9<- &fp[AA]
@@ -2690,12 +2690,12 @@
*/
/* op vAA, field@BBBB */
- .extern artGetObjStaticFromCode
+ .extern MterpGetObjStatic
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGetObjStaticFromCode
+ bl MterpGetObjStatic
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2723,12 +2723,12 @@
*/
/* op vAA, field@BBBB */
- .extern artGetBooleanStaticFromCode
+ .extern MterpGetBooleanStatic
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGetBooleanStaticFromCode
+ bl MterpGetBooleanStatic
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2756,12 +2756,12 @@
*/
/* op vAA, field@BBBB */
- .extern artGetByteStaticFromCode
+ .extern MterpGetByteStatic
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGetByteStaticFromCode
+ bl MterpGetByteStatic
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2789,12 +2789,12 @@
*/
/* op vAA, field@BBBB */
- .extern artGetCharStaticFromCode
+ .extern MterpGetCharStatic
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGetCharStaticFromCode
+ bl MterpGetCharStatic
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2822,12 +2822,12 @@
*/
/* op vAA, field@BBBB */
- .extern artGetShortStaticFromCode
+ .extern MterpGetShortStatic
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGetShortStaticFromCode
+ bl MterpGetShortStatic
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2860,7 +2860,7 @@
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl artSet32StaticFromCode
+ bl MterpSet32Static
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -2876,15 +2876,15 @@
*
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
- ldr r1, [rFP, #OFF_FP_METHOD]
- mov r2, rINST, lsr #8 @ r3<- AA
- VREG_INDEX_TO_ADDR r2, r2
+ mov r1, rINST, lsr #8 @ r1<- AA
+ VREG_INDEX_TO_ADDR r1, r1
+ ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl artSet64IndirectStaticFromMterp
+ bl MterpSet64Static
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -2925,7 +2925,7 @@
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl artSet8StaticFromCode
+ bl MterpSetBooleanStatic
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -2951,7 +2951,7 @@
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl artSet8StaticFromCode
+ bl MterpSetByteStatic
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -2977,7 +2977,7 @@
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl artSet16StaticFromCode
+ bl MterpSetCharStatic
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -3003,7 +3003,7 @@
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl artSet16StaticFromCode
+ bl MterpSetShortStatic
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index a7b5587..ef5a4da 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -2543,12 +2543,12 @@
*/
/* op vAA, field//BBBB */
- .extern artGet32StaticFromCode
+ .extern MterpGet32Static
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGet32StaticFromCode
+ bl MterpGet32Static
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
@@ -2573,12 +2573,12 @@
*/
/* sget-wide vAA, field//BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64StaticFromCode
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGet64StaticFromCode
+ bl MterpGet64Static
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w4, wINST, #8 // w4<- AA
cbnz x3, MterpException // bail out
@@ -2599,12 +2599,12 @@
*/
/* op vAA, field//BBBB */
- .extern artGetObjStaticFromCode
+ .extern MterpGetObjStatic
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGetObjStaticFromCode
+ bl MterpGetObjStatic
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
@@ -2632,12 +2632,12 @@
*/
/* op vAA, field//BBBB */
- .extern artGetBooleanStaticFromCode
+ .extern MterpGetBooleanStatic
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGetBooleanStaticFromCode
+ bl MterpGetBooleanStatic
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
uxtb w0, w0
@@ -2665,12 +2665,12 @@
*/
/* op vAA, field//BBBB */
- .extern artGetByteStaticFromCode
+ .extern MterpGetByteStatic
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGetByteStaticFromCode
+ bl MterpGetByteStatic
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
sxtb w0, w0
@@ -2698,12 +2698,12 @@
*/
/* op vAA, field//BBBB */
- .extern artGetCharStaticFromCode
+ .extern MterpGetCharStatic
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGetCharStaticFromCode
+ bl MterpGetCharStatic
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
uxth w0, w0
@@ -2731,12 +2731,12 @@
*/
/* op vAA, field//BBBB */
- .extern artGetShortStaticFromCode
+ .extern MterpGetShortStatic
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGetShortStaticFromCode
+ bl MterpGetShortStatic
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
sxth w0, w0
@@ -2769,7 +2769,7 @@
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl artSet32StaticFromCode
+ bl MterpSet32Static
cbnz w0, MterpException // 0 on success
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2784,15 +2784,15 @@
*
*/
/* sput-wide vAA, field//BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
- ldr x1, [xFP, #OFF_FP_METHOD]
- lsr w2, wINST, #8 // w3<- AA
- VREG_INDEX_TO_ADDR x2, w2
+ lsr w1, wINST, #8 // w1<- AA
+ VREG_INDEX_TO_ADDR x1, w1
+ ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl artSet64IndirectStaticFromMterp
+ bl MterpSet64Static
cbnz w0, MterpException // 0 on success, -1 on failure
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from wINST
@@ -2831,7 +2831,7 @@
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl artSet8StaticFromCode
+ bl MterpSetBooleanStatic
cbnz w0, MterpException // 0 on success
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2856,7 +2856,7 @@
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl artSet8StaticFromCode
+ bl MterpSetByteStatic
cbnz w0, MterpException // 0 on success
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2881,7 +2881,7 @@
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl artSet16StaticFromCode
+ bl MterpSetCharStatic
cbnz w0, MterpException // 0 on success
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2906,7 +2906,7 @@
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl artSet16StaticFromCode
+ bl MterpSetShortStatic
cbnz w0, MterpException // 0 on success
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index b47c019..579afc2 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -3038,12 +3038,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGet32StaticFromCode
+ .extern MterpGet32Static
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGet32StaticFromCode)
+ JAL(MterpGet32Static)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3064,12 +3064,12 @@
* 64-bit SGET handler.
*/
/* sget-wide vAA, field@BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGet64StaticFromCode)
+ JAL(MterpGet64Static)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
bnez a3, MterpException
GET_OPA(a1) # a1 <- AA
@@ -3088,12 +3088,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetObjStaticFromCode
+ .extern MterpGetObjStatic
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGetObjStaticFromCode)
+ JAL(MterpGetObjStatic)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3118,12 +3118,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetBooleanStaticFromCode
+ .extern MterpGetBooleanStatic
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGetBooleanStaticFromCode)
+ JAL(MterpGetBooleanStatic)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3148,12 +3148,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetByteStaticFromCode
+ .extern MterpGetByteStatic
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGetByteStaticFromCode)
+ JAL(MterpGetByteStatic)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3178,12 +3178,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetCharStaticFromCode
+ .extern MterpGetCharStatic
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGetCharStaticFromCode)
+ JAL(MterpGetCharStatic)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3208,12 +3208,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetShortStaticFromCode
+ .extern MterpGetShortStatic
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGetShortStaticFromCode)
+ JAL(MterpGetShortStatic)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3244,7 +3244,7 @@
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(artSet32StaticFromCode)
+ JAL(MterpSet32Static)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3258,15 +3258,15 @@
* 64-bit SPUT handler.
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
- lw a1, OFF_FP_METHOD(rFP) # a1 <- method
- GET_OPA(a2) # a2 <- AA
- EAS2(a2, rFP, a2) # a2 <- &fp[AA]
+ GET_OPA(a1) # a1 <- AA
+ EAS2(a1, rFP, a1) # a1 <- &fp[AA]
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(artSet64IndirectStaticFromMterp)
+ JAL(MterpSet64Static)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3311,7 +3311,7 @@
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(artSet8StaticFromCode)
+ JAL(MterpSetBooleanStatic)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3336,7 +3336,7 @@
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(artSet8StaticFromCode)
+ JAL(MterpSetByteStatic)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3361,7 +3361,7 @@
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(artSet16StaticFromCode)
+ JAL(MterpSetCharStatic)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3386,7 +3386,7 @@
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(artSet16StaticFromCode)
+ JAL(MterpSetShortStatic)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index e1867d0..3656df9 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -2585,12 +2585,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern artGet32StaticFromCode
+ .extern MterpGet32Static
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGet32StaticFromCode
+ jal MterpGet32Static
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
@@ -2614,12 +2614,12 @@
*
*/
/* sget-wide vAA, field//BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGet64StaticFromCode
+ jal MterpGet64Static
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a4, rINST, 8 # a4 <- AA
bnez a3, MterpException # bail out
@@ -2639,12 +2639,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern artGetObjStaticFromCode
+ .extern MterpGetObjStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGetObjStaticFromCode
+ jal MterpGetObjStatic
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
@@ -2671,12 +2671,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern artGetBooleanStaticFromCode
+ .extern MterpGetBooleanStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGetBooleanStaticFromCode
+ jal MterpGetBooleanStatic
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
and v0, v0, 0xff
@@ -2703,12 +2703,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern artGetByteStaticFromCode
+ .extern MterpGetByteStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGetByteStaticFromCode
+ jal MterpGetByteStatic
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
seb v0, v0
@@ -2735,12 +2735,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern artGetCharStaticFromCode
+ .extern MterpGetCharStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGetCharStaticFromCode
+ jal MterpGetCharStatic
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
and v0, v0, 0xffff
@@ -2767,12 +2767,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern artGetShortStaticFromCode
+ .extern MterpGetShortStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGetShortStaticFromCode
+ jal MterpGetShortStatic
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
seh v0, v0
@@ -2798,7 +2798,7 @@
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field//BBBB */
- .extern artSet32StaticFromCode
+ .extern MterpSet32Static
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a3, rINST, 8 # a3 <- AA
@@ -2806,7 +2806,7 @@
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal artSet32StaticFromCode
+ jal MterpSet32Static
bnezc v0, MterpException # 0 on success
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2821,15 +2821,15 @@
*
*/
/* sput-wide vAA, field//BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
- ld a1, OFF_FP_METHOD(rFP)
- srl a2, rINST, 8 # a2 <- AA
- dlsa a2, a2, rFP, 2
+ srl a1, rINST, 8 # a2 <- AA
+ dlsa a1, a1, rFP, 2
+ ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal artSet64IndirectStaticFromMterp
+ jal MterpSet64Static
bnezc v0, MterpException # 0 on success, -1 on failure
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2862,7 +2862,7 @@
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field//BBBB */
- .extern artSet8StaticFromCode
+ .extern MterpSetBooleanStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a3, rINST, 8 # a3 <- AA
@@ -2870,7 +2870,7 @@
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal artSet8StaticFromCode
+ jal MterpSetBooleanStatic
bnezc v0, MterpException # 0 on success
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2888,7 +2888,7 @@
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field//BBBB */
- .extern artSet8StaticFromCode
+ .extern MterpSetByteStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a3, rINST, 8 # a3 <- AA
@@ -2896,7 +2896,7 @@
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal artSet8StaticFromCode
+ jal MterpSetByteStatic
bnezc v0, MterpException # 0 on success
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2914,7 +2914,7 @@
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field//BBBB */
- .extern artSet16StaticFromCode
+ .extern MterpSetCharStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a3, rINST, 8 # a3 <- AA
@@ -2922,7 +2922,7 @@
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal artSet16StaticFromCode
+ jal MterpSetCharStatic
bnezc v0, MterpException # 0 on success
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2940,7 +2940,7 @@
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field//BBBB */
- .extern artSet16StaticFromCode
+ .extern MterpSetShortStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a3, rINST, 8 # a3 <- AA
@@ -2948,7 +2948,7 @@
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal artSet16StaticFromCode
+ jal MterpSetShortStatic
bnezc v0, MterpException # 0 on success
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index aab20f5..21d9671 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -2535,7 +2535,7 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGet32StaticFromCode
+ .extern MterpGet32Static
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2543,7 +2543,7 @@
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGet32StaticFromCode)
+ call SYMBOL(MterpGet32Static)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2564,7 +2564,7 @@
*
*/
/* sget-wide vAA, field@BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2572,7 +2572,7 @@
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGet64StaticFromCode)
+ call SYMBOL(MterpGet64Static)
movl rSELF, %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
@@ -2592,7 +2592,7 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetObjStaticFromCode
+ .extern MterpGetObjStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2600,7 +2600,7 @@
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGetObjStaticFromCode)
+ call SYMBOL(MterpGetObjStatic)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2624,7 +2624,7 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetBooleanStaticFromCode
+ .extern MterpGetBooleanStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2632,7 +2632,7 @@
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGetBooleanStaticFromCode)
+ call SYMBOL(MterpGetBooleanStatic)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2656,7 +2656,7 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetByteStaticFromCode
+ .extern MterpGetByteStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2664,7 +2664,7 @@
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGetByteStaticFromCode)
+ call SYMBOL(MterpGetByteStatic)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2688,7 +2688,7 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetCharStaticFromCode
+ .extern MterpGetCharStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2696,7 +2696,7 @@
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGetCharStaticFromCode)
+ call SYMBOL(MterpGetCharStatic)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2720,7 +2720,7 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetShortStaticFromCode
+ .extern MterpGetShortStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2728,7 +2728,7 @@
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGetShortStaticFromCode)
+ call SYMBOL(MterpGetShortStatic)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2751,7 +2751,7 @@
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet32StaticFromCode
+ .extern MterpSet32Static
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2761,7 +2761,7 @@
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artSet32StaticFromCode)
+ call SYMBOL(MterpSet32Static)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -2776,17 +2776,17 @@
*
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG1(%esp) # referrer
leal VREG_ADDRESS(rINST), %eax
- movl %eax, OUT_ARG2(%esp) # &fp[AA]
+ movl %eax, OUT_ARG1(%esp) # &fp[AA]
+ movl OFF_FP_METHOD(rFP), %eax
+ movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artSet64IndirectStaticFromMterp)
+ call SYMBOL(MterpSet64Static)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -2821,7 +2821,7 @@
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet8StaticFromCode
+ .extern MterpSetBooleanStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2831,7 +2831,7 @@
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artSet8StaticFromCode)
+ call SYMBOL(MterpSetBooleanStatic)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -2849,7 +2849,7 @@
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet8StaticFromCode
+ .extern MterpSetByteStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2859,7 +2859,7 @@
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artSet8StaticFromCode)
+ call SYMBOL(MterpSetByteStatic)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -2877,7 +2877,7 @@
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet16StaticFromCode
+ .extern MterpSetCharStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2887,7 +2887,7 @@
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artSet16StaticFromCode)
+ call SYMBOL(MterpSetCharStatic)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -2905,7 +2905,7 @@
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet16StaticFromCode
+ .extern MterpSetShortStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2915,7 +2915,7 @@
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artSet16StaticFromCode)
+ call SYMBOL(MterpSetShortStatic)
testb %al, %al
jnz MterpException
RESTORE_IBASE
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
index eb57066..b5a5ae5 100644
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ b/runtime/interpreter/mterp/out/mterp_x86_64.S
@@ -2445,12 +2445,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern artGet32StaticFromCode
+ .extern MterpGet32Static
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(artGet32StaticFromCode)
+ call SYMBOL(MterpGet32Static)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2476,12 +2476,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(artGet64StaticFromCode)
+ call SYMBOL(MterpGet64Static)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2508,12 +2508,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern artGetObjStaticFromCode
+ .extern MterpGetObjStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(artGetObjStaticFromCode)
+ call SYMBOL(MterpGetObjStatic)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2540,12 +2540,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern artGetBooleanStaticFromCode
+ .extern MterpGetBooleanStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(artGetBooleanStaticFromCode)
+ call SYMBOL(MterpGetBooleanStatic)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2572,12 +2572,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern artGetByteStaticFromCode
+ .extern MterpGetByteStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(artGetByteStaticFromCode)
+ call SYMBOL(MterpGetByteStatic)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2604,12 +2604,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern artGetCharStaticFromCode
+ .extern MterpGetCharStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(artGetCharStaticFromCode)
+ call SYMBOL(MterpGetCharStatic)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2636,12 +2636,12 @@
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern artGetShortStaticFromCode
+ .extern MterpGetShortStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(artGetShortStaticFromCode)
+ call SYMBOL(MterpGetShortStatic)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2667,13 +2667,13 @@
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet32StaticFromCode
+ .extern MterpSet32Static
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(artSet32StaticFromCode)
+ call SYMBOL(MterpSet32Static)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2687,13 +2687,13 @@
*
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
- movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
- leaq VREG_ADDRESS(rINSTq), OUT_ARG2 # &fp[AA]
+ leaq VREG_ADDRESS(rINSTq), OUT_ARG1 # &fp[AA]
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(artSet64IndirectStaticFromMterp)
+ call SYMBOL(MterpSet64Static)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2724,13 +2724,13 @@
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet8StaticFromCode
+ .extern MterpSetBooleanStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(artSet8StaticFromCode)
+ call SYMBOL(MterpSetBooleanStatic)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2747,13 +2747,13 @@
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet8StaticFromCode
+ .extern MterpSetByteStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(artSet8StaticFromCode)
+ call SYMBOL(MterpSetByteStatic)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2770,13 +2770,13 @@
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet16StaticFromCode
+ .extern MterpSetCharStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(artSet16StaticFromCode)
+ call SYMBOL(MterpSetCharStatic)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2793,13 +2793,13 @@
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet16StaticFromCode
+ .extern MterpSetShortStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(artSet16StaticFromCode)
+ call SYMBOL(MterpSetShortStatic)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_sget.S b/runtime/interpreter/mterp/x86/op_sget.S
index 0e9a3d8..6e42d32 100644
--- a/runtime/interpreter/mterp/x86/op_sget.S
+++ b/runtime/interpreter/mterp/x86/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32StaticFromCode" }
+%default { "is_object":"0", "helper":"MterpGet32Static" }
/*
* General SGET handler wrapper.
*
diff --git a/runtime/interpreter/mterp/x86/op_sget_boolean.S b/runtime/interpreter/mterp/x86/op_sget_boolean.S
index f058dd8..5fa2bf0 100644
--- a/runtime/interpreter/mterp/x86/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/x86/op_sget_boolean.S
@@ -1 +1 @@
-%include "x86/op_sget.S" {"helper":"artGetBooleanStaticFromCode"}
+%include "x86/op_sget.S" {"helper":"MterpGetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_byte.S b/runtime/interpreter/mterp/x86/op_sget_byte.S
index c952f40..ef812f1 100644
--- a/runtime/interpreter/mterp/x86/op_sget_byte.S
+++ b/runtime/interpreter/mterp/x86/op_sget_byte.S
@@ -1 +1 @@
-%include "x86/op_sget.S" {"helper":"artGetByteStaticFromCode"}
+%include "x86/op_sget.S" {"helper":"MterpGetByteStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_char.S b/runtime/interpreter/mterp/x86/op_sget_char.S
index d7bd410..3bc34ef 100644
--- a/runtime/interpreter/mterp/x86/op_sget_char.S
+++ b/runtime/interpreter/mterp/x86/op_sget_char.S
@@ -1 +1 @@
-%include "x86/op_sget.S" {"helper":"artGetCharStaticFromCode"}
+%include "x86/op_sget.S" {"helper":"MterpGetCharStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_object.S b/runtime/interpreter/mterp/x86/op_sget_object.S
index 1c95f9a..b829e75 100644
--- a/runtime/interpreter/mterp/x86/op_sget_object.S
+++ b/runtime/interpreter/mterp/x86/op_sget_object.S
@@ -1 +1 @@
-%include "x86/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
+%include "x86/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_short.S b/runtime/interpreter/mterp/x86/op_sget_short.S
index 6475306..449cf6f 100644
--- a/runtime/interpreter/mterp/x86/op_sget_short.S
+++ b/runtime/interpreter/mterp/x86/op_sget_short.S
@@ -1 +1 @@
-%include "x86/op_sget.S" {"helper":"artGetShortStaticFromCode"}
+%include "x86/op_sget.S" {"helper":"MterpGetShortStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_wide.S b/runtime/interpreter/mterp/x86/op_sget_wide.S
index 2b60303..a605bcf 100644
--- a/runtime/interpreter/mterp/x86/op_sget_wide.S
+++ b/runtime/interpreter/mterp/x86/op_sget_wide.S
@@ -3,7 +3,7 @@
*
*/
/* sget-wide vAA, field@BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -11,7 +11,7 @@
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGet64StaticFromCode)
+ call SYMBOL(MterpGet64Static)
movl rSELF, %ecx
cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
diff --git a/runtime/interpreter/mterp/x86/op_sput.S b/runtime/interpreter/mterp/x86/op_sput.S
index 0b5de09..99f6088 100644
--- a/runtime/interpreter/mterp/x86/op_sput.S
+++ b/runtime/interpreter/mterp/x86/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"artSet32StaticFromCode"}
+%default { "helper":"MterpSet32Static"}
/*
* General SPUT handler wrapper.
*
diff --git a/runtime/interpreter/mterp/x86/op_sput_boolean.S b/runtime/interpreter/mterp/x86/op_sput_boolean.S
index 63601bd..a7fffda 100644
--- a/runtime/interpreter/mterp/x86/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/x86/op_sput_boolean.S
@@ -1 +1 @@
-%include "x86/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "x86/op_sput.S" {"helper":"MterpSetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_byte.S b/runtime/interpreter/mterp/x86/op_sput_byte.S
index 63601bd..3a5ff92 100644
--- a/runtime/interpreter/mterp/x86/op_sput_byte.S
+++ b/runtime/interpreter/mterp/x86/op_sput_byte.S
@@ -1 +1 @@
-%include "x86/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "x86/op_sput.S" {"helper":"MterpSetByteStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_char.S b/runtime/interpreter/mterp/x86/op_sput_char.S
index 1749f7c..565cc2a 100644
--- a/runtime/interpreter/mterp/x86/op_sput_char.S
+++ b/runtime/interpreter/mterp/x86/op_sput_char.S
@@ -1 +1 @@
-%include "x86/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "x86/op_sput.S" {"helper":"MterpSetCharStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_short.S b/runtime/interpreter/mterp/x86/op_sput_short.S
index 1749f7c..85c3441 100644
--- a/runtime/interpreter/mterp/x86/op_sput_short.S
+++ b/runtime/interpreter/mterp/x86/op_sput_short.S
@@ -1 +1 @@
-%include "x86/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "x86/op_sput.S" {"helper":"MterpSetShortStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_wide.S b/runtime/interpreter/mterp/x86/op_sput_wide.S
index 19cff0d..8cc7e28 100644
--- a/runtime/interpreter/mterp/x86/op_sput_wide.S
+++ b/runtime/interpreter/mterp/x86/op_sput_wide.S
@@ -3,17 +3,17 @@
*
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG1(%esp) # referrer
leal VREG_ADDRESS(rINST), %eax
- movl %eax, OUT_ARG2(%esp) # &fp[AA]
+ movl %eax, OUT_ARG1(%esp) # &fp[AA]
+ movl OFF_FP_METHOD(rFP), %eax
+ movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artSet64IndirectStaticFromMterp)
+ call SYMBOL(MterpSet64Static)
testb %al, %al
jnz MterpException
RESTORE_IBASE
diff --git a/runtime/interpreter/mterp/x86_64/op_sget.S b/runtime/interpreter/mterp/x86_64/op_sget.S
index d39e6c4..e996c77 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32StaticFromCode", "wide":"0" }
+%default { "is_object":"0", "helper":"MterpGet32Static", "wide":"0" }
/*
* General SGET handler wrapper.
*
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_boolean.S b/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
index 7d358da..ee772ad 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"helper":"artGetBooleanStaticFromCode"}
+%include "x86_64/op_sget.S" {"helper":"MterpGetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_byte.S b/runtime/interpreter/mterp/x86_64/op_sget_byte.S
index 79d9ff4..f65ea49 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_byte.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_byte.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"helper":"artGetByteStaticFromCode"}
+%include "x86_64/op_sget.S" {"helper":"MterpGetByteStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_char.S b/runtime/interpreter/mterp/x86_64/op_sget_char.S
index 4488610..3972551 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_char.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_char.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"helper":"artGetCharStaticFromCode"}
+%include "x86_64/op_sget.S" {"helper":"MterpGetCharStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_object.S b/runtime/interpreter/mterp/x86_64/op_sget_object.S
index 09b627e..a0bbfd8 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_object.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_object.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
+%include "x86_64/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_short.S b/runtime/interpreter/mterp/x86_64/op_sget_short.S
index 47ac238..df212dc 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_short.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_short.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"helper":"artGetShortStaticFromCode"}
+%include "x86_64/op_sget.S" {"helper":"MterpGetShortStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_wide.S b/runtime/interpreter/mterp/x86_64/op_sget_wide.S
index aa22343..1e98e28 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_wide.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_wide.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"helper":"artGet64StaticFromCode", "wide":"1"}
+%include "x86_64/op_sget.S" {"helper":"MterpGet64Static", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput.S b/runtime/interpreter/mterp/x86_64/op_sput.S
index e92b032..9705619 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"artSet32StaticFromCode"}
+%default { "helper":"MterpSet32Static"}
/*
* General SPUT handler wrapper.
*
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_boolean.S b/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
index 8718915..8bf4a62 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
@@ -1 +1 @@
-%include "x86_64/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "x86_64/op_sput.S" {"helper":"MterpSetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_byte.S b/runtime/interpreter/mterp/x86_64/op_sput_byte.S
index 8718915..5bb26eb 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_byte.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_byte.S
@@ -1 +1 @@
-%include "x86_64/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "x86_64/op_sput.S" {"helper":"MterpSetByteStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_char.S b/runtime/interpreter/mterp/x86_64/op_sput_char.S
index 2fe9d14..42b244e 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_char.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_char.S
@@ -1 +1 @@
-%include "x86_64/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "x86_64/op_sput.S" {"helper":"MterpSetCharStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_short.S b/runtime/interpreter/mterp/x86_64/op_sput_short.S
index 2fe9d14..9670092 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_short.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_short.S
@@ -1 +1 @@
-%include "x86_64/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "x86_64/op_sput.S" {"helper":"MterpSetShortStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_wide.S b/runtime/interpreter/mterp/x86_64/op_sput_wide.S
index c4bc269..a21bcb5 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_wide.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_wide.S
@@ -3,13 +3,13 @@
*
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
- movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
- leaq VREG_ADDRESS(rINSTq), OUT_ARG2 # &fp[AA]
+ leaq VREG_ADDRESS(rINSTq), OUT_ARG1 # &fp[AA]
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(artSet64IndirectStaticFromMterp)
+ call SYMBOL(MterpSet64Static)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 5ae2fc5..48bf1e7 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -430,8 +430,7 @@
// starts up.
LOG(WARNING) << "Dex location " << dex_location_ << " does not seem to include dex file. "
<< "Allow oat file use. This is potentially dangerous.";
- } else if (file.GetOatHeader().GetImageFileLocationOatChecksum()
- != GetCombinedImageChecksum()) {
+ } else if (file.GetOatHeader().GetImageFileLocationOatChecksum() != image_info->oat_checksum) {
VLOG(oat) << "Oat image checksum does not match image checksum.";
return kOatBootImageOutOfDate;
}
@@ -726,68 +725,81 @@
return required_dex_checksums_found_ ? &cached_required_dex_checksums_ : nullptr;
}
+// TODO: Use something better than xor for the combined image checksum.
+std::unique_ptr<OatFileAssistant::ImageInfo>
+OatFileAssistant::ImageInfo::GetRuntimeImageInfo(InstructionSet isa, std::string* error_msg) {
+ CHECK(error_msg != nullptr);
+
+ // Use the currently loaded image to determine the image locations for all
+ // the image spaces, regardless of the isa requested. Otherwise we would
+ // need to read from the boot image's oat file to determine the rest of the
+ // image locations in the case of multi-image.
+ Runtime* runtime = Runtime::Current();
+ std::vector<gc::space::ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces();
+ if (image_spaces.empty()) {
+ *error_msg = "There are no boot image spaces";
+ return nullptr;
+ }
+
+ std::unique_ptr<ImageInfo> info(new ImageInfo());
+ info->location = image_spaces[0]->GetImageLocation();
+
+ // TODO: Special casing on isa == kRuntimeISA is presumably motivated by
+ // performance: 'it's faster to use an already loaded image header than read
+ // the image header from disk'. But the loaded image is not necessarily the
+ // same as kRuntimeISA, so this behavior is suspect (b/35659889).
+ if (isa == kRuntimeISA) {
+ const ImageHeader& image_header = image_spaces[0]->GetImageHeader();
+ info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin());
+ info->patch_delta = image_header.GetPatchDelta();
+
+ info->oat_checksum = 0;
+ for (gc::space::ImageSpace* image_space : image_spaces) {
+ info->oat_checksum ^= image_space->GetImageHeader().GetOatChecksum();
+ }
+ } else {
+ std::unique_ptr<ImageHeader> image_header(
+ gc::space::ImageSpace::ReadImageHeader(info->location.c_str(), isa, error_msg));
+ if (image_header == nullptr) {
+ return nullptr;
+ }
+ info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
+ info->patch_delta = image_header->GetPatchDelta();
+
+ info->oat_checksum = 0;
+ for (gc::space::ImageSpace* image_space : image_spaces) {
+ std::string location = image_space->GetImageLocation();
+ image_header.reset(
+ gc::space::ImageSpace::ReadImageHeader(location.c_str(), isa, error_msg));
+ if (image_header == nullptr) {
+ return nullptr;
+ }
+ info->oat_checksum ^= image_header->GetOatChecksum();
+ }
+ }
+ return info;
+}
+
const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() {
if (!image_info_load_attempted_) {
image_info_load_attempted_ = true;
-
- Runtime* runtime = Runtime::Current();
- std::vector<gc::space::ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces();
- if (!image_spaces.empty()) {
- cached_image_info_.location = image_spaces[0]->GetImageLocation();
-
- if (isa_ == kRuntimeISA) {
- const ImageHeader& image_header = image_spaces[0]->GetImageHeader();
- cached_image_info_.oat_checksum = image_header.GetOatChecksum();
- cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(
- image_header.GetOatDataBegin());
- cached_image_info_.patch_delta = image_header.GetPatchDelta();
- } else {
- std::string error_msg;
- std::unique_ptr<ImageHeader> image_header(
- gc::space::ImageSpace::ReadImageHeader(cached_image_info_.location.c_str(),
- isa_,
- &error_msg));
- CHECK(image_header != nullptr) << error_msg;
- cached_image_info_.oat_checksum = image_header->GetOatChecksum();
- cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(
- image_header->GetOatDataBegin());
- cached_image_info_.patch_delta = image_header->GetPatchDelta();
- }
+ std::string error_msg;
+ cached_image_info_ = ImageInfo::GetRuntimeImageInfo(isa_, &error_msg);
+ if (cached_image_info_ == nullptr) {
+ LOG(WARNING) << "Unable to get runtime image info: " << error_msg;
}
- image_info_load_succeeded_ = (!image_spaces.empty());
-
- combined_image_checksum_ = CalculateCombinedImageChecksum(isa_);
}
- return image_info_load_succeeded_ ? &cached_image_info_ : nullptr;
+ return cached_image_info_.get();
}
-// TODO: Use something better than xor.
uint32_t OatFileAssistant::CalculateCombinedImageChecksum(InstructionSet isa) {
- uint32_t checksum = 0;
- std::vector<gc::space::ImageSpace*> image_spaces =
- Runtime::Current()->GetHeap()->GetBootImageSpaces();
- if (isa == kRuntimeISA) {
- for (gc::space::ImageSpace* image_space : image_spaces) {
- checksum ^= image_space->GetImageHeader().GetOatChecksum();
- }
- } else {
- for (gc::space::ImageSpace* image_space : image_spaces) {
- std::string location = image_space->GetImageLocation();
- std::string error_msg;
- std::unique_ptr<ImageHeader> image_header(
- gc::space::ImageSpace::ReadImageHeader(location.c_str(), isa, &error_msg));
- CHECK(image_header != nullptr) << error_msg;
- checksum ^= image_header->GetOatChecksum();
- }
+ std::string error_msg;
+ std::unique_ptr<ImageInfo> info = ImageInfo::GetRuntimeImageInfo(isa, &error_msg);
+ if (info == nullptr) {
+ LOG(WARNING) << "Unable to get runtime image info for checksum: " << error_msg;
+ return 0;
}
- return checksum;
-}
-
-uint32_t OatFileAssistant::GetCombinedImageChecksum() {
- if (!image_info_load_attempted_) {
- GetImageInfo();
- }
- return combined_image_checksum_;
+ return info->oat_checksum;
}
OatFileAssistant::OatFileInfo& OatFileAssistant::GetBestInfo() {
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 3ede29f..eec87f0 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -284,6 +284,9 @@
uintptr_t oat_data_begin = 0;
int32_t patch_delta = 0;
std::string location;
+
+ static std::unique_ptr<ImageInfo> GetRuntimeImageInfo(InstructionSet isa,
+ std::string* error_msg);
};
class OatFileInfo {
@@ -414,8 +417,6 @@
// The caller shouldn't clean up or free the returned pointer.
const ImageInfo* GetImageInfo();
- uint32_t GetCombinedImageChecksum();
-
// To implement Lock(), we lock a dummy file where the oat file would go
// (adding ".flock" to the target file name) and retain the lock for the
// remaining lifetime of the OatFileAssistant object.
@@ -445,9 +446,7 @@
// TODO: The image info should probably be moved out of the oat file
// assistant to an image file manager.
bool image_info_load_attempted_ = false;
- bool image_info_load_succeeded_ = false;
- ImageInfo cached_image_info_;
- uint32_t combined_image_checksum_ = 0;
+ std::unique_ptr<ImageInfo> cached_image_info_;
DISALLOW_COPY_AND_ASSIGN(OatFileAssistant);
};
diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc
index c7294a9..d52f0ea 100644
--- a/runtime/openjdkjvmti/ti_heap.cc
+++ b/runtime/openjdkjvmti/ti_heap.cc
@@ -162,6 +162,424 @@
return 0;
}
+template <typename UserData>
+bool VisitorFalse(art::ObjPtr<art::mirror::Object> obj ATTRIBUTE_UNUSED,
+ art::ObjPtr<art::mirror::Class> klass ATTRIBUTE_UNUSED,
+ art::ArtField& field ATTRIBUTE_UNUSED,
+ size_t field_index ATTRIBUTE_UNUSED,
+ UserData* user_data ATTRIBUTE_UNUSED) {
+ return false;
+}
+
+template <typename UserData, bool kCallVisitorOnRecursion>
+class FieldVisitor {
+ public:
+ // Report the contents of a primitive fields of the given object, if a callback is set.
+ template <typename StaticPrimitiveVisitor,
+ typename StaticReferenceVisitor,
+ typename InstancePrimitiveVisitor,
+ typename InstanceReferenceVisitor>
+ static bool ReportFields(art::ObjPtr<art::mirror::Object> obj,
+ UserData* user_data,
+ StaticPrimitiveVisitor& static_prim_visitor,
+ StaticReferenceVisitor& static_ref_visitor,
+ InstancePrimitiveVisitor& instance_prim_visitor,
+ InstanceReferenceVisitor& instance_ref_visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ FieldVisitor fv(user_data);
+
+ if (obj->IsClass()) {
+ // When visiting a class, we only visit the static fields of the given class. No field of
+ // superclasses is visited.
+ art::ObjPtr<art::mirror::Class> klass = obj->AsClass();
+ // Only report fields on resolved classes. We need valid field data.
+ if (!klass->IsResolved()) {
+ return false;
+ }
+ return fv.ReportFieldsImpl(nullptr,
+ obj->AsClass(),
+ obj->AsClass()->IsInterface(),
+ static_prim_visitor,
+ static_ref_visitor,
+ instance_prim_visitor,
+ instance_ref_visitor);
+ } else {
+ // See comment above. Just double-checking here, but an instance *should* mean the class was
+ // resolved.
+ DCHECK(obj->GetClass()->IsResolved() || obj->GetClass()->IsErroneousResolved());
+ return fv.ReportFieldsImpl(obj,
+ obj->GetClass(),
+ false,
+ static_prim_visitor,
+ static_ref_visitor,
+ instance_prim_visitor,
+ instance_ref_visitor);
+ }
+ }
+
+ private:
+ explicit FieldVisitor(UserData* user_data) : user_data_(user_data) {}
+
+ // Report the contents of fields of the given object. If obj is null, report the static fields,
+ // otherwise the instance fields.
+ template <typename StaticPrimitiveVisitor,
+ typename StaticReferenceVisitor,
+ typename InstancePrimitiveVisitor,
+ typename InstanceReferenceVisitor>
+ bool ReportFieldsImpl(art::ObjPtr<art::mirror::Object> obj,
+ art::ObjPtr<art::mirror::Class> klass,
+ bool skip_java_lang_object,
+ StaticPrimitiveVisitor& static_prim_visitor,
+ StaticReferenceVisitor& static_ref_visitor,
+ InstancePrimitiveVisitor& instance_prim_visitor,
+ InstanceReferenceVisitor& instance_ref_visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ // Compute the offset of field indices.
+ size_t interface_field_count = CountInterfaceFields(klass);
+
+ size_t tmp;
+ bool aborted = ReportFieldsRecursive(obj,
+ klass,
+ interface_field_count,
+ skip_java_lang_object,
+ static_prim_visitor,
+ static_ref_visitor,
+ instance_prim_visitor,
+ instance_ref_visitor,
+ &tmp);
+ return aborted;
+ }
+
+ // Visit primitive fields in an object (instance). Return true if the visit was aborted.
+ template <typename StaticPrimitiveVisitor,
+ typename StaticReferenceVisitor,
+ typename InstancePrimitiveVisitor,
+ typename InstanceReferenceVisitor>
+ bool ReportFieldsRecursive(art::ObjPtr<art::mirror::Object> obj,
+ art::ObjPtr<art::mirror::Class> klass,
+ size_t interface_fields,
+ bool skip_java_lang_object,
+ StaticPrimitiveVisitor& static_prim_visitor,
+ StaticReferenceVisitor& static_ref_visitor,
+ InstancePrimitiveVisitor& instance_prim_visitor,
+ InstanceReferenceVisitor& instance_ref_visitor,
+ size_t* field_index_out)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DCHECK(klass != nullptr);
+ size_t field_index;
+ if (klass->GetSuperClass() == nullptr) {
+ // j.l.Object. Start with the fields from interfaces.
+ field_index = interface_fields;
+ if (skip_java_lang_object) {
+ *field_index_out = field_index;
+ return false;
+ }
+ } else {
+ // Report superclass fields.
+ if (kCallVisitorOnRecursion) {
+ if (ReportFieldsRecursive(obj,
+ klass->GetSuperClass(),
+ interface_fields,
+ skip_java_lang_object,
+ static_prim_visitor,
+ static_ref_visitor,
+ instance_prim_visitor,
+ instance_ref_visitor,
+ &field_index)) {
+ return true;
+ }
+ } else {
+ // Still call, but with empty visitor. This is required for correct counting.
+ ReportFieldsRecursive(obj,
+ klass->GetSuperClass(),
+ interface_fields,
+ skip_java_lang_object,
+ VisitorFalse<UserData>,
+ VisitorFalse<UserData>,
+ VisitorFalse<UserData>,
+ VisitorFalse<UserData>,
+ &field_index);
+ }
+ }
+
+ // Now visit fields for the current klass.
+
+ for (auto& static_field : klass->GetSFields()) {
+ if (static_field.IsPrimitiveType()) {
+ if (static_prim_visitor(obj,
+ klass,
+ static_field,
+ field_index,
+ user_data_)) {
+ return true;
+ }
+ } else {
+ if (static_ref_visitor(obj,
+ klass,
+ static_field,
+ field_index,
+ user_data_)) {
+ return true;
+ }
+ }
+ field_index++;
+ }
+
+ for (auto& instance_field : klass->GetIFields()) {
+ if (instance_field.IsPrimitiveType()) {
+ if (instance_prim_visitor(obj,
+ klass,
+ instance_field,
+ field_index,
+ user_data_)) {
+ return true;
+ }
+ } else {
+ if (instance_ref_visitor(obj,
+ klass,
+ instance_field,
+ field_index,
+ user_data_)) {
+ return true;
+ }
+ }
+ field_index++;
+ }
+
+ *field_index_out = field_index;
+ return false;
+ }
+
+ // Implements a visit of the implemented interfaces of a given class.
+ template <typename T>
+ struct RecursiveInterfaceVisit {
+ static void VisitStatic(art::Thread* self, art::ObjPtr<art::mirror::Class> klass, T& visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ RecursiveInterfaceVisit rv;
+ rv.Visit(self, klass, visitor);
+ }
+
+ void Visit(art::Thread* self, art::ObjPtr<art::mirror::Class> klass, T& visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ // First visit the parent, to get the order right.
+ // (We do this in preparation for actual visiting of interface fields.)
+ if (klass->GetSuperClass() != nullptr) {
+ Visit(self, klass->GetSuperClass(), visitor);
+ }
+ for (uint32_t i = 0; i != klass->NumDirectInterfaces(); ++i) {
+ art::ObjPtr<art::mirror::Class> inf_klass =
+ art::mirror::Class::GetDirectInterface(self, klass, i);
+ DCHECK(inf_klass != nullptr);
+ VisitInterface(self, inf_klass, visitor);
+ }
+ }
+
+ void VisitInterface(art::Thread* self, art::ObjPtr<art::mirror::Class> inf_klass, T& visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ auto it = visited_interfaces.find(inf_klass.Ptr());
+ if (it != visited_interfaces.end()) {
+ return;
+ }
+ visited_interfaces.insert(inf_klass.Ptr());
+
+ // Let the visitor know about this one. Note that this order is acceptable, as the ordering
+ // of these fields never matters for known visitors.
+ visitor(inf_klass);
+
+ // Now visit the superinterfaces.
+ for (uint32_t i = 0; i != inf_klass->NumDirectInterfaces(); ++i) {
+ art::ObjPtr<art::mirror::Class> super_inf_klass =
+ art::mirror::Class::GetDirectInterface(self, inf_klass, i);
+ DCHECK(super_inf_klass != nullptr);
+ VisitInterface(self, super_inf_klass, visitor);
+ }
+ }
+
+ std::unordered_set<art::mirror::Class*> visited_interfaces;
+ };
+
+ // Counting interface fields. Note that we cannot use the interface table, as that only contains
+ // "non-marker" interfaces (= interfaces with methods).
+ static size_t CountInterfaceFields(art::ObjPtr<art::mirror::Class> klass)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ size_t count = 0;
+ auto visitor = [&count](art::ObjPtr<art::mirror::Class> inf_klass)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DCHECK(inf_klass->IsInterface());
+ DCHECK_EQ(0u, inf_klass->NumInstanceFields());
+ count += inf_klass->NumStaticFields();
+ };
+ RecursiveInterfaceVisit<decltype(visitor)>::VisitStatic(art::Thread::Current(), klass, visitor);
+ return count;
+
+ // TODO: Implement caching.
+ }
+
+ UserData* user_data_;
+};
+
+// Debug helper. Prints the structure of an object.
+template <bool kStatic, bool kRef>
+struct DumpVisitor {
+ static bool Callback(art::ObjPtr<art::mirror::Object> obj ATTRIBUTE_UNUSED,
+ art::ObjPtr<art::mirror::Class> klass ATTRIBUTE_UNUSED,
+ art::ArtField& field,
+ size_t field_index,
+ void* user_data ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ LOG(ERROR) << (kStatic ? "static " : "instance ")
+ << (kRef ? "ref " : "primitive ")
+ << field.PrettyField()
+ << " @ "
+ << field_index;
+ return false;
+ }
+};
+ATTRIBUTE_UNUSED
+void DumpObjectFields(art::ObjPtr<art::mirror::Object> obj)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (obj->IsClass()) {
+ FieldVisitor<void, false>:: ReportFields(obj,
+ nullptr,
+ DumpVisitor<true, false>::Callback,
+ DumpVisitor<true, true>::Callback,
+ DumpVisitor<false, false>::Callback,
+ DumpVisitor<false, true>::Callback);
+ } else {
+ FieldVisitor<void, true>::ReportFields(obj,
+ nullptr,
+ DumpVisitor<true, false>::Callback,
+ DumpVisitor<true, true>::Callback,
+ DumpVisitor<false, false>::Callback,
+ DumpVisitor<false, true>::Callback);
+ }
+}
+
+class ReportPrimitiveField {
+ public:
+ static bool Report(art::ObjPtr<art::mirror::Object> obj,
+ ObjectTagTable* tag_table,
+ const jvmtiHeapCallbacks* cb,
+ const void* user_data)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (UNLIKELY(cb->primitive_field_callback != nullptr)) {
+ jlong class_tag = tag_table->GetTagOrZero(obj->GetClass());
+ ReportPrimitiveField rpf(tag_table, class_tag, cb, user_data);
+ if (obj->IsClass()) {
+ return FieldVisitor<ReportPrimitiveField, false>::ReportFields(
+ obj,
+ &rpf,
+ ReportPrimitiveFieldCallback<true>,
+ VisitorFalse<ReportPrimitiveField>,
+ VisitorFalse<ReportPrimitiveField>,
+ VisitorFalse<ReportPrimitiveField>);
+ } else {
+ return FieldVisitor<ReportPrimitiveField, true>::ReportFields(
+ obj,
+ &rpf,
+ VisitorFalse<ReportPrimitiveField>,
+ VisitorFalse<ReportPrimitiveField>,
+ ReportPrimitiveFieldCallback<false>,
+ VisitorFalse<ReportPrimitiveField>);
+ }
+ }
+ return false;
+ }
+
+
+ private:
+ ReportPrimitiveField(ObjectTagTable* tag_table,
+ jlong class_tag,
+ const jvmtiHeapCallbacks* cb,
+ const void* user_data)
+ : tag_table_(tag_table), class_tag_(class_tag), cb_(cb), user_data_(user_data) {}
+
+ template <bool kReportStatic>
+ static bool ReportPrimitiveFieldCallback(art::ObjPtr<art::mirror::Object> obj,
+ art::ObjPtr<art::mirror::Class> klass,
+ art::ArtField& field,
+ size_t field_index,
+ ReportPrimitiveField* user_data)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::Primitive::Type art_prim_type = field.GetTypeAsPrimitiveType();
+ jvmtiPrimitiveType prim_type =
+ static_cast<jvmtiPrimitiveType>(art::Primitive::Descriptor(art_prim_type)[0]);
+ DCHECK(prim_type == JVMTI_PRIMITIVE_TYPE_BOOLEAN ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_BYTE ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_CHAR ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_SHORT ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_INT ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_LONG ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_FLOAT ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_DOUBLE);
+ jvmtiHeapReferenceInfo info;
+ info.field.index = field_index;
+
+ jvalue value;
+ memset(&value, 0, sizeof(jvalue));
+ art::ObjPtr<art::mirror::Object> src = kReportStatic ? klass : obj;
+ switch (art_prim_type) {
+ case art::Primitive::Type::kPrimBoolean:
+ value.z = field.GetBoolean(src) == 0 ? JNI_FALSE : JNI_TRUE;
+ break;
+ case art::Primitive::Type::kPrimByte:
+ value.b = field.GetByte(src);
+ break;
+ case art::Primitive::Type::kPrimChar:
+ value.c = field.GetChar(src);
+ break;
+ case art::Primitive::Type::kPrimShort:
+ value.s = field.GetShort(src);
+ break;
+ case art::Primitive::Type::kPrimInt:
+ value.i = field.GetInt(src);
+ break;
+ case art::Primitive::Type::kPrimLong:
+ value.j = field.GetLong(src);
+ break;
+ case art::Primitive::Type::kPrimFloat:
+ value.f = field.GetFloat(src);
+ break;
+ case art::Primitive::Type::kPrimDouble:
+ value.d = field.GetDouble(src);
+ break;
+ case art::Primitive::Type::kPrimVoid:
+ case art::Primitive::Type::kPrimNot: {
+ LOG(FATAL) << "Should not reach here";
+ UNREACHABLE();
+ }
+ }
+
+ jlong obj_tag = user_data->tag_table_->GetTagOrZero(src.Ptr());
+ const jlong saved_obj_tag = obj_tag;
+
+ jint ret = user_data->cb_->primitive_field_callback(kReportStatic
+ ? JVMTI_HEAP_REFERENCE_STATIC_FIELD
+ : JVMTI_HEAP_REFERENCE_FIELD,
+ &info,
+ user_data->class_tag_,
+ &obj_tag,
+ value,
+ prim_type,
+ const_cast<void*>(user_data->user_data_));
+
+ if (saved_obj_tag != obj_tag) {
+ user_data->tag_table_->Set(src.Ptr(), obj_tag);
+ }
+
+ if ((ret & JVMTI_VISIT_ABORT) != 0) {
+ return true;
+ }
+
+ return false;
+ }
+
+ ObjectTagTable* tag_table_;
+ jlong class_tag_;
+ const jvmtiHeapCallbacks* cb_;
+ const void* user_data_;
+};
+
struct HeapFilter {
explicit HeapFilter(jint heap_filter)
: filter_out_tagged((heap_filter & JVMTI_HEAP_FILTER_TAGGED) != 0),
@@ -292,7 +710,12 @@
ithd->stop_reports = (array_ret & JVMTI_VISIT_ABORT) != 0;
}
- // TODO Implement primitive field callback.
+ if (!ithd->stop_reports) {
+ ithd->stop_reports = ReportPrimitiveField::Report(obj,
+ ithd->heap_util->GetTags(),
+ ithd->callbacks,
+ ithd->user_data);
+ }
}
jvmtiError HeapUtil::IterateThroughHeap(jvmtiEnv* env,
@@ -568,64 +991,50 @@
return;
}
- // TODO: We'll probably have to rewrite this completely with our own visiting logic, if we
- // want to have a chance of getting the field indices computed halfway efficiently. For
- // now, ignore them altogether.
-
- struct InstanceReferenceVisitor {
- explicit InstanceReferenceVisitor(FollowReferencesHelper* helper_)
- : helper(helper_), stop_reports(false) {}
-
- void operator()(art::mirror::Object* src,
- art::MemberOffset field_offset,
- bool is_static ATTRIBUTE_UNUSED) const
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!*helper->tag_table_->GetAllowDisallowLock()) {
- if (stop_reports) {
- return;
- }
-
- art::mirror::Object* trg = src->GetFieldObjectReferenceAddr(field_offset)->AsMirrorPtr();
+ // All instance fields.
+ auto report_instance_field = [&](art::ObjPtr<art::mirror::Object> src,
+ art::ObjPtr<art::mirror::Class> obj_klass ATTRIBUTE_UNUSED,
+ art::ArtField& field,
+ size_t field_index,
+ void* user_data ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+ art::ObjPtr<art::mirror::Object> field_value = field.GetObject(src);
+ if (field_value != nullptr) {
jvmtiHeapReferenceInfo reference_info;
memset(&reference_info, 0, sizeof(reference_info));
// TODO: Implement spec-compliant numbering.
- reference_info.field.index = field_offset.Int32Value();
+ reference_info.field.index = field_index;
jvmtiHeapReferenceKind kind =
- field_offset.Int32Value() == art::mirror::Object::ClassOffset().Int32Value()
+ field.GetOffset().Int32Value() == art::mirror::Object::ClassOffset().Int32Value()
? JVMTI_HEAP_REFERENCE_CLASS
: JVMTI_HEAP_REFERENCE_FIELD;
const jvmtiHeapReferenceInfo* reference_info_ptr =
kind == JVMTI_HEAP_REFERENCE_CLASS ? nullptr : &reference_info;
- stop_reports = !helper->ReportReferenceMaybeEnqueue(kind, reference_info_ptr, src, trg);
+ return !ReportReferenceMaybeEnqueue(kind, reference_info_ptr, src.Ptr(), field_value.Ptr());
}
-
- void VisitRoot(art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED)
- const {
- LOG(FATAL) << "Unreachable";
- }
- void VisitRootIfNonNull(
- art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED) const {
- LOG(FATAL) << "Unreachable";
- }
-
- // "mutable" required by the visitor API.
- mutable FollowReferencesHelper* helper;
- mutable bool stop_reports;
+ return false;
};
-
- InstanceReferenceVisitor visitor(this);
- // Visit references, not native roots.
- obj->VisitReferences<false>(visitor, art::VoidFunctor());
-
- stop_reports_ = visitor.stop_reports;
-
- if (!stop_reports_) {
- jint string_ret = ReportString(obj, env, tag_table_, callbacks_, user_data_);
- stop_reports_ = (string_ret & JVMTI_VISIT_ABORT) != 0;
+ stop_reports_ = FieldVisitor<void, true>::ReportFields(obj,
+ nullptr,
+ VisitorFalse<void>,
+ VisitorFalse<void>,
+ VisitorFalse<void>,
+ report_instance_field);
+ if (stop_reports_) {
+ return;
}
+
+ jint string_ret = ReportString(obj, env, tag_table_, callbacks_, user_data_);
+ stop_reports_ = (string_ret & JVMTI_VISIT_ABORT) != 0;
+ if (stop_reports_) {
+ return;
+ }
+
+ stop_reports_ = ReportPrimitiveField::Report(obj, tag_table_, callbacks_, user_data_);
}
void VisitArray(art::mirror::Object* array)
@@ -719,26 +1128,38 @@
DCHECK_EQ(h_klass.Get(), klass);
// Declared static fields.
- for (auto& field : klass->GetSFields()) {
- if (!field.IsPrimitiveType()) {
- art::ObjPtr<art::mirror::Object> field_value = field.GetObject(klass);
- if (field_value != nullptr) {
- jvmtiHeapReferenceInfo reference_info;
- memset(&reference_info, 0, sizeof(reference_info));
+ auto report_static_field = [&](art::ObjPtr<art::mirror::Object> obj ATTRIBUTE_UNUSED,
+ art::ObjPtr<art::mirror::Class> obj_klass,
+ art::ArtField& field,
+ size_t field_index,
+ void* user_data ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+ art::ObjPtr<art::mirror::Object> field_value = field.GetObject(obj_klass);
+ if (field_value != nullptr) {
+ jvmtiHeapReferenceInfo reference_info;
+ memset(&reference_info, 0, sizeof(reference_info));
- // TODO: Implement spec-compliant numbering.
- reference_info.field.index = field.GetOffset().Int32Value();
+ reference_info.field.index = static_cast<jint>(field_index);
- stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_STATIC_FIELD,
- &reference_info,
- klass,
- field_value.Ptr());
- if (stop_reports_) {
- return;
- }
- }
+ return !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_STATIC_FIELD,
+ &reference_info,
+ obj_klass.Ptr(),
+ field_value.Ptr());
}
+ return false;
+ };
+ stop_reports_ = FieldVisitor<void, false>::ReportFields(klass,
+ nullptr,
+ VisitorFalse<void>,
+ report_static_field,
+ VisitorFalse<void>,
+ VisitorFalse<void>);
+ if (stop_reports_) {
+ return;
}
+
+ stop_reports_ = ReportPrimitiveField::Report(klass, tag_table_, callbacks_, user_data_);
}
void MaybeEnqueue(art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index c4d20c0..7a69078 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -779,6 +779,8 @@
CheckSameMethods();
}
+class RedefinitionDataIter;
+
// A wrapper that lets us hold onto the arbitrary sized data needed for redefinitions in a
// reasonably sane way. This adds no fields to the normal ObjectArray. By doing this we can avoid
// having to deal with the fact that we need to hold an arbitrary number of references live.
@@ -802,13 +804,15 @@
RedefinitionDataHolder(art::StackHandleScope<1>* hs,
art::Runtime* runtime,
art::Thread* self,
- int32_t num_redefinitions) REQUIRES_SHARED(art::Locks::mutator_lock_) :
+ std::vector<Redefiner::ClassRedefinition>* redefinitions)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) :
arr_(
hs->NewHandle(
art::mirror::ObjectArray<art::mirror::Object>::Alloc(
self,
runtime->GetClassLinker()->GetClassRoot(art::ClassLinker::kObjectArrayClass),
- num_redefinitions * kNumSlots))) {}
+ redefinitions->size() * kNumSlots))),
+ redefinitions_(redefinitions) {}
bool IsNull() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
return arr_.IsNull();
@@ -870,8 +874,27 @@
return arr_->GetLength() / kNumSlots;
}
+ std::vector<Redefiner::ClassRedefinition>* GetRedefinitions()
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return redefinitions_;
+ }
+
+ bool operator==(const RedefinitionDataHolder& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return arr_.Get() == other.arr_.Get();
+ }
+
+ bool operator!=(const RedefinitionDataHolder& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return !(*this == other);
+ }
+
+ RedefinitionDataIter begin() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ RedefinitionDataIter end() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
private:
mutable art::Handle<art::mirror::ObjectArray<art::mirror::Object>> arr_;
+ std::vector<Redefiner::ClassRedefinition>* redefinitions_;
art::mirror::Object* GetSlot(jint klass_index,
DataSlot slot) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
@@ -890,8 +913,115 @@
DISALLOW_COPY_AND_ASSIGN(RedefinitionDataHolder);
};
-bool Redefiner::ClassRedefinition::CheckVerification(int32_t klass_index,
- const RedefinitionDataHolder& holder) {
+class RedefinitionDataIter {
+ public:
+ RedefinitionDataIter(int32_t idx, RedefinitionDataHolder& holder) : idx_(idx), holder_(holder) {}
+
+ RedefinitionDataIter(const RedefinitionDataIter&) = default;
+ RedefinitionDataIter(RedefinitionDataIter&&) = default;
+ RedefinitionDataIter& operator=(const RedefinitionDataIter&) = default;
+ RedefinitionDataIter& operator=(RedefinitionDataIter&&) = default;
+
+ bool operator==(const RedefinitionDataIter& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return idx_ == other.idx_ && holder_ == other.holder_;
+ }
+
+ bool operator!=(const RedefinitionDataIter& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return !(*this == other);
+ }
+
+ RedefinitionDataIter operator++() { // Value after modification.
+ idx_++;
+ return *this;
+ }
+
+ RedefinitionDataIter operator++(int) {
+ RedefinitionDataIter temp = *this;
+ idx_++;
+ return temp;
+ }
+
+ RedefinitionDataIter operator+(ssize_t delta) const {
+ RedefinitionDataIter temp = *this;
+ temp += delta;
+ return temp;
+ }
+
+ RedefinitionDataIter& operator+=(ssize_t delta) {
+ idx_ += delta;
+ return *this;
+ }
+
+ Redefiner::ClassRedefinition& GetRedefinition() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return (*holder_.GetRedefinitions())[idx_];
+ }
+
+ RedefinitionDataHolder& GetHolder() {
+ return holder_;
+ }
+
+ art::mirror::ClassLoader* GetSourceClassLoader() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetSourceClassLoader(idx_);
+ }
+ art::mirror::Object* GetJavaDexFile() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetJavaDexFile(idx_);
+ }
+ art::mirror::LongArray* GetNewDexFileCookie() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetNewDexFileCookie(idx_);
+ }
+ art::mirror::DexCache* GetNewDexCache() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetNewDexCache(idx_);
+ }
+ art::mirror::Class* GetMirrorClass() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetMirrorClass(idx_);
+ }
+ art::mirror::ByteArray* GetOriginalDexFileBytes() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetOriginalDexFileBytes(idx_);
+ }
+ int32_t GetIndex() const {
+ return idx_;
+ }
+
+ void SetSourceClassLoader(art::mirror::ClassLoader* loader)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetSourceClassLoader(idx_, loader);
+ }
+ void SetJavaDexFile(art::mirror::Object* dexfile) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetJavaDexFile(idx_, dexfile);
+ }
+ void SetNewDexFileCookie(art::mirror::LongArray* cookie)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetNewDexFileCookie(idx_, cookie);
+ }
+ void SetNewDexCache(art::mirror::DexCache* cache) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetNewDexCache(idx_, cache);
+ }
+ void SetMirrorClass(art::mirror::Class* klass) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetMirrorClass(idx_, klass);
+ }
+ void SetOriginalDexFileBytes(art::mirror::ByteArray* bytes)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetOriginalDexFileBytes(idx_, bytes);
+ }
+
+ private:
+ int32_t idx_;
+ RedefinitionDataHolder& holder_;
+};
+
+RedefinitionDataIter RedefinitionDataHolder::begin() {
+ return RedefinitionDataIter(0, *this);
+}
+
+RedefinitionDataIter RedefinitionDataHolder::end() {
+ return RedefinitionDataIter(Length(), *this);
+}
+
+bool Redefiner::ClassRedefinition::CheckVerification(const RedefinitionDataIter& iter) {
DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
art::StackHandleScope<2> hs(driver_->self_);
std::string error;
@@ -899,7 +1029,7 @@
art::verifier::MethodVerifier::FailureKind failure =
art::verifier::MethodVerifier::VerifyClass(driver_->self_,
dex_file_.get(),
- hs.NewHandle(holder.GetNewDexCache(klass_index)),
+ hs.NewHandle(iter.GetNewDexCache()),
hs.NewHandle(GetClassLoader()),
dex_file_->GetClassDef(0), /*class_def*/
nullptr, /*compiler_callbacks*/
@@ -918,21 +1048,20 @@
// dexfile. This is so that even if multiple classes with the same classloader are redefined at
// once they are all added to the classloader.
bool Redefiner::ClassRedefinition::AllocateAndRememberNewDexFileCookie(
- int32_t klass_index,
art::Handle<art::mirror::ClassLoader> source_class_loader,
art::Handle<art::mirror::Object> dex_file_obj,
- /*out*/RedefinitionDataHolder* holder) {
+ /*out*/RedefinitionDataIter* cur_data) {
art::StackHandleScope<2> hs(driver_->self_);
art::MutableHandle<art::mirror::LongArray> old_cookie(
hs.NewHandle<art::mirror::LongArray>(nullptr));
bool has_older_cookie = false;
// See if we already have a cookie that a previous redefinition got from the same classloader.
- for (int32_t i = 0; i < klass_index; i++) {
- if (holder->GetSourceClassLoader(i) == source_class_loader.Get()) {
+ for (auto old_data = cur_data->GetHolder().begin(); old_data != *cur_data; ++old_data) {
+ if (old_data.GetSourceClassLoader() == source_class_loader.Get()) {
// Since every instance of this classloader should have the same cookie associated with it we
// can stop looking here.
has_older_cookie = true;
- old_cookie.Assign(holder->GetNewDexFileCookie(i));
+ old_cookie.Assign(old_data.GetNewDexFileCookie());
break;
}
}
@@ -953,14 +1082,14 @@
}
// Save the cookie.
- holder->SetNewDexFileCookie(klass_index, new_cookie.Get());
+ cur_data->SetNewDexFileCookie(new_cookie.Get());
// If there are other copies of this same classloader we need to make sure that we all have the
// same cookie.
if (has_older_cookie) {
- for (int32_t i = 0; i < klass_index; i++) {
+ for (auto old_data = cur_data->GetHolder().begin(); old_data != *cur_data; ++old_data) {
// We will let the GC take care of the cookie we allocated for this one.
- if (holder->GetSourceClassLoader(i) == source_class_loader.Get()) {
- holder->SetNewDexFileCookie(i, new_cookie.Get());
+ if (old_data.GetSourceClassLoader() == source_class_loader.Get()) {
+ old_data.SetNewDexFileCookie(new_cookie.Get());
}
}
}
@@ -969,32 +1098,32 @@
}
bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
- int32_t klass_index, /*out*/RedefinitionDataHolder* holder) {
+ /*out*/RedefinitionDataIter* cur_data) {
art::ScopedObjectAccessUnchecked soa(driver_->self_);
art::StackHandleScope<2> hs(driver_->self_);
- holder->SetMirrorClass(klass_index, GetMirrorClass());
+ cur_data->SetMirrorClass(GetMirrorClass());
// This shouldn't allocate
art::Handle<art::mirror::ClassLoader> loader(hs.NewHandle(GetClassLoader()));
// The bootclasspath is handled specially so it doesn't have a j.l.DexFile.
if (!art::ClassLinker::IsBootClassLoader(soa, loader.Get())) {
- holder->SetSourceClassLoader(klass_index, loader.Get());
+ cur_data->SetSourceClassLoader(loader.Get());
art::Handle<art::mirror::Object> dex_file_obj(hs.NewHandle(
ClassLoaderHelper::FindSourceDexFileObject(driver_->self_, loader)));
- holder->SetJavaDexFile(klass_index, dex_file_obj.Get());
+ cur_data->SetJavaDexFile(dex_file_obj.Get());
if (dex_file_obj == nullptr) {
RecordFailure(ERR(INTERNAL), "Unable to find dex file!");
return false;
}
// Allocate the new dex file cookie.
- if (!AllocateAndRememberNewDexFileCookie(klass_index, loader, dex_file_obj, holder)) {
+ if (!AllocateAndRememberNewDexFileCookie(loader, dex_file_obj, cur_data)) {
driver_->self_->AssertPendingOOMException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate dex file array for class loader");
return false;
}
}
- holder->SetNewDexCache(klass_index, CreateNewDexCache(loader));
- if (holder->GetNewDexCache(klass_index) == nullptr) {
+ cur_data->SetNewDexCache(CreateNewDexCache(loader));
+ if (cur_data->GetNewDexCache() == nullptr) {
driver_->self_->AssertPendingException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate DexCache");
@@ -1002,8 +1131,8 @@
}
// We won't always need to set this field.
- holder->SetOriginalDexFileBytes(klass_index, AllocateOrGetOriginalDexFileBytes());
- if (holder->GetOriginalDexFileBytes(klass_index) == nullptr) {
+ cur_data->SetOriginalDexFileBytes(AllocateOrGetOriginalDexFileBytes());
+ if (cur_data->GetOriginalDexFileBytes() == nullptr) {
driver_->self_->AssertPendingOOMException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate array for original dex file");
@@ -1048,13 +1177,11 @@
}
bool Redefiner::FinishAllRemainingAllocations(RedefinitionDataHolder& holder) {
- int32_t cnt = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
// Allocate the data this redefinition requires.
- if (!redef.FinishRemainingAllocations(cnt, &holder)) {
+ if (!data.GetRedefinition().FinishRemainingAllocations(&data)) {
return false;
}
- cnt++;
}
return true;
}
@@ -1069,22 +1196,39 @@
}
}
-bool Redefiner::CheckAllClassesAreVerified(const RedefinitionDataHolder& holder) {
- int32_t cnt = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
- if (!redef.CheckVerification(cnt, holder)) {
+bool Redefiner::CheckAllClassesAreVerified(RedefinitionDataHolder& holder) {
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+ if (!data.GetRedefinition().CheckVerification(data)) {
return false;
}
- cnt++;
}
return true;
}
+class ScopedDisableConcurrentAndMovingGc {
+ public:
+ ScopedDisableConcurrentAndMovingGc(art::gc::Heap* heap, art::Thread* self)
+ : heap_(heap), self_(self) {
+ if (heap_->IsGcConcurrentAndMoving()) {
+ heap_->IncrementDisableMovingGC(self_);
+ }
+ }
+
+ ~ScopedDisableConcurrentAndMovingGc() {
+ if (heap_->IsGcConcurrentAndMoving()) {
+ heap_->DecrementDisableMovingGC(self_);
+ }
+ }
+ private:
+ art::gc::Heap* heap_;
+ art::Thread* self_;
+};
+
jvmtiError Redefiner::Run() {
art::StackHandleScope<1> hs(self_);
// Allocate an array to hold onto all java temporary objects associated with this redefinition.
// We will let this be collected after the end of this function.
- RedefinitionDataHolder holder(&hs, runtime_, self_, redefinitions_.size());
+ RedefinitionDataHolder holder(&hs, runtime_, self_, &redefinitions_);
if (holder.IsNull()) {
self_->AssertPendingOOMException();
self_->ClearException();
@@ -1107,57 +1251,43 @@
// cleaned up by the GC eventually.
return result_;
}
+
// At this point we can no longer fail without corrupting the runtime state.
- int32_t counter = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
- if (holder.GetSourceClassLoader(counter) == nullptr) {
- runtime_->GetClassLinker()->AppendToBootClassPath(self_, redef.GetDexFile());
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+ if (data.GetSourceClassLoader() == nullptr) {
+ runtime_->GetClassLinker()->AppendToBootClassPath(self_, data.GetRedefinition().GetDexFile());
}
- counter++;
}
UnregisterAllBreakpoints();
+
// Disable GC and wait for it to be done if we are a moving GC. This is fine since we are done
// allocating so no deadlocks.
- art::gc::Heap* heap = runtime_->GetHeap();
- if (heap->IsGcConcurrentAndMoving()) {
- // GC moving objects can cause deadlocks as we are deoptimizing the stack.
- heap->IncrementDisableMovingGC(self_);
- }
+ ScopedDisableConcurrentAndMovingGc sdcamgc(runtime_->GetHeap(), self_);
+
// Do transition to final suspension
// TODO We might want to give this its own suspended state!
// TODO This isn't right. We need to change state without any chance of suspend ideally!
- self_->TransitionFromRunnableToSuspended(art::ThreadState::kNative);
- runtime_->GetThreadList()->SuspendAll(
- "Final installation of redefined Classes!", /*long_suspend*/true);
- counter = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ art::ScopedThreadSuspension sts(self_, art::ThreadState::kNative);
+ art::ScopedSuspendAll ssa("Final installation of redefined Classes!", /*long_suspend*/true);
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
art::ScopedAssertNoThreadSuspension nts("Updating runtime objects for redefinition");
- if (holder.GetSourceClassLoader(counter) != nullptr) {
- ClassLoaderHelper::UpdateJavaDexFile(holder.GetJavaDexFile(counter),
- holder.GetNewDexFileCookie(counter));
+ ClassRedefinition& redef = data.GetRedefinition();
+ if (data.GetSourceClassLoader() != nullptr) {
+ ClassLoaderHelper::UpdateJavaDexFile(data.GetJavaDexFile(), data.GetNewDexFileCookie());
}
- art::mirror::Class* klass = holder.GetMirrorClass(counter);
+ art::mirror::Class* klass = data.GetMirrorClass();
// TODO Rewrite so we don't do a stack walk for each and every class.
redef.FindAndAllocateObsoleteMethods(klass);
- redef.UpdateClass(klass, holder.GetNewDexCache(counter),
- holder.GetOriginalDexFileBytes(counter));
- counter++;
+ redef.UpdateClass(klass, data.GetNewDexCache(), data.GetOriginalDexFileBytes());
}
// TODO We should check for if any of the redefined methods are intrinsic methods here and, if any
// are, force a full-world deoptimization before finishing redefinition. If we don't do this then
// methods that have been jitted prior to the current redefinition being applied might continue
// to use the old versions of the intrinsics!
// TODO Shrink the obsolete method maps if possible?
- // TODO Put this into a scoped thing.
- runtime_->GetThreadList()->ResumeAll();
- // Get back shared mutator lock as expected for return.
- self_->TransitionFromSuspendedToRunnable();
// TODO Do the dex_file release at a more reasonable place. This works but it muddles who really
// owns the DexFile and when ownership is transferred.
ReleaseAllDexFiles();
- if (heap->IsGcConcurrentAndMoving()) {
- heap->DecrementDisableMovingGC(self_);
- }
return OK;
}
@@ -1259,8 +1389,6 @@
art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->EnsureExtDataPresent(driver_->self_)));
if (ext == nullptr) {
// No memory. Clear exception (it's not useful) and return error.
- // TODO This doesn't need to be fatal. We could just not support obsolete methods after hitting
- // this case.
driver_->self_->AssertPendingOOMException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate ClassExt");
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 4e6d05f..4313a94 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -66,6 +66,7 @@
namespace openjdkjvmti {
class RedefinitionDataHolder;
+class RedefinitionDataIter;
// Class that can redefine a single class's methods.
// TODO We should really make this be driven by an outside class so we can do multiple classes at
@@ -143,14 +144,13 @@
driver_->RecordFailure(e, class_sig_, err);
}
- bool FinishRemainingAllocations(int32_t klass_index, /*out*/RedefinitionDataHolder* holder)
+ bool FinishRemainingAllocations(/*out*/RedefinitionDataIter* cur_data)
REQUIRES_SHARED(art::Locks::mutator_lock_);
bool AllocateAndRememberNewDexFileCookie(
- int32_t klass_index,
art::Handle<art::mirror::ClassLoader> source_class_loader,
art::Handle<art::mirror::Object> dex_file_obj,
- /*out*/RedefinitionDataHolder* holder)
+ /*out*/RedefinitionDataIter* cur_data)
REQUIRES_SHARED(art::Locks::mutator_lock_);
void FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass)
@@ -161,8 +161,7 @@
bool CheckClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
// Checks that the contained class can be successfully verified.
- bool CheckVerification(int32_t klass_index,
- const RedefinitionDataHolder& holder)
+ bool CheckVerification(const RedefinitionDataIter& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
// Preallocates all needed allocations in klass so that we can pause execution safely.
@@ -241,7 +240,7 @@
jvmtiError Run() REQUIRES_SHARED(art::Locks::mutator_lock_);
bool CheckAllRedefinitionAreValid() REQUIRES_SHARED(art::Locks::mutator_lock_);
- bool CheckAllClassesAreVerified(const RedefinitionDataHolder& holder)
+ bool CheckAllClassesAreVerified(RedefinitionDataHolder& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
bool EnsureAllClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
bool FinishAllRemainingAllocations(RedefinitionDataHolder& holder)
@@ -255,6 +254,8 @@
}
friend struct CallbackCtx;
+ friend class RedefinitionDataHolder;
+ friend class RedefinitionDataIter;
};
} // namespace openjdkjvmti
diff --git a/test/154-gc-loop/src/Main.java b/test/154-gc-loop/src/Main.java
index 3a256c1..69015b6 100644
--- a/test/154-gc-loop/src/Main.java
+++ b/test/154-gc-loop/src/Main.java
@@ -38,7 +38,7 @@
}
} catch (Exception e) {}
System.out.println("Finalize count too large: " +
- ((finalizeCounter >= 10) ? Integer.toString(finalizeCounter) : "false"));
+ ((finalizeCounter >= 15) ? Integer.toString(finalizeCounter) : "false"));
}
private static native void backgroundProcessState();
diff --git a/test/640-checker-integer-valueof/expected.txt b/test/640-checker-integer-valueof/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/640-checker-integer-valueof/expected.txt
diff --git a/test/640-checker-integer-valueof/info.txt b/test/640-checker-integer-valueof/info.txt
new file mode 100644
index 0000000..51021a4
--- /dev/null
+++ b/test/640-checker-integer-valueof/info.txt
@@ -0,0 +1 @@
+Test for Integer.valueOf.
diff --git a/test/640-checker-integer-valueof/src/Main.java b/test/640-checker-integer-valueof/src/Main.java
new file mode 100644
index 0000000..0837fd1
--- /dev/null
+++ b/test/640-checker-integer-valueof/src/Main.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ /// CHECK-START: java.lang.Integer Main.foo(int) disassembly (after)
+ /// CHECK: <<Integer:l\d+>> InvokeStaticOrDirect method_name:java.lang.Integer.valueOf intrinsic:IntegerValueOf
+ /// CHECK: pAllocObjectInitialized
+ /// CHECK: Return [<<Integer>>]
+ public static Integer foo(int a) {
+ return Integer.valueOf(a);
+ }
+
+ /// CHECK-START: java.lang.Integer Main.foo2() disassembly (after)
+ /// CHECK: <<Integer:l\d+>> InvokeStaticOrDirect method_name:java.lang.Integer.valueOf intrinsic:IntegerValueOf
+ /// CHECK-NOT: pAllocObjectInitialized
+ /// CHECK: Return [<<Integer>>]
+ public static Integer foo2() {
+ return Integer.valueOf(-42);
+ }
+
+ /// CHECK-START: java.lang.Integer Main.foo3() disassembly (after)
+ /// CHECK: <<Integer:l\d+>> InvokeStaticOrDirect method_name:java.lang.Integer.valueOf intrinsic:IntegerValueOf
+ /// CHECK-NOT: pAllocObjectInitialized
+ /// CHECK: Return [<<Integer>>]
+ public static Integer foo3() {
+ return Integer.valueOf(42);
+ }
+
+ /// CHECK-START: java.lang.Integer Main.foo4() disassembly (after)
+ /// CHECK: <<Integer:l\d+>> InvokeStaticOrDirect method_name:java.lang.Integer.valueOf intrinsic:IntegerValueOf
+ /// CHECK: pAllocObjectInitialized
+ /// CHECK: Return [<<Integer>>]
+ public static Integer foo4() {
+ return Integer.valueOf(55555);
+ }
+
+ public static void main(String[] args) {
+ assertEqual("42", foo(intField));
+ assertEqual(foo(intField), foo(intField2));
+ assertEqual("-42", foo2());
+ assertEqual("42", foo3());
+ assertEqual("55555", foo4());
+ assertEqual("55555", foo(intField3));
+ assertEqual("-129", foo(intFieldMinus129));
+ assertEqual("-128", foo(intFieldMinus128));
+ assertEqual(foo(intFieldMinus128), foo(intFieldMinus128));
+ assertEqual("-127", foo(intFieldMinus127));
+ assertEqual(foo(intFieldMinus127), foo(intFieldMinus127));
+ assertEqual("126", foo(intField126));
+ assertEqual(foo(intField126), foo(intField126));
+ assertEqual("127", foo(intField127));
+ assertEqual(foo(intField127), foo(intField127));
+ assertEqual("128", foo(intField128));
+ }
+
+ static void assertEqual(String a, Integer b) {
+ if (!a.equals(b.toString())) {
+ throw new Error("Expected " + a + ", got " + b);
+ }
+ }
+
+ static void assertEqual(Integer a, Integer b) {
+ if (a != b) {
+ throw new Error("Expected " + a + ", got " + b);
+ }
+ }
+
+ static int intField = 42;
+ static int intField2 = 42;
+ static int intField3 = 55555;
+
+ // Edge cases.
+ static int intFieldMinus129 = -129;
+ static int intFieldMinus128 = -128;
+ static int intFieldMinus127 = -127;
+ static int intField126 = 126;
+ static int intField127 = 127;
+ static int intField128 = 128;
+}
diff --git a/test/906-iterate-heap/expected.txt b/test/906-iterate-heap/expected.txt
index 3e857ab..b6af843 100644
--- a/test/906-iterate-heap/expected.txt
+++ b/test/906-iterate-heap/expected.txt
@@ -18,3 +18,27 @@
2
1@0 (32, 2xD '0000000000000000000000000000f03f')
2
+10000@0 (static, int, index=3) 0000000000000000
+10001
+10000@0 (static, int, index=11) 0000000000000000
+10001
+10000@0 (static, int, index=0) 0000000000000000
+10001
+10000@0 (static, int, index=1) 0000000000000000
+10001
+10000@0 (instance, int, index=2) 0000000000000000
+10001@0 (instance, byte, index=4) 0000000000000001
+10002@0 (instance, char, index=5) 0000000000000061
+10003@0 (instance, int, index=6) 0000000000000003
+10004@0 (instance, long, index=7) 0000000000000004
+10005@0 (instance, short, index=9) 0000000000000002
+10006
+10000@0 (instance, int, index=3) 0000000000000000
+10001@0 (instance, byte, index=5) 0000000000000001
+10002@0 (instance, char, index=6) 0000000000000061
+10003@0 (instance, int, index=7) 0000000000000003
+10004@0 (instance, long, index=8) 0000000000000004
+10005@0 (instance, short, index=10) 0000000000000002
+10006@0 (instance, double, index=12) 3ff3ae147ae147ae
+10007@0 (instance, float, index=13) 000000003f9d70a4
+10008
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
index 890220e..13c3562 100644
--- a/test/906-iterate-heap/iterate_heap.cc
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -322,5 +322,92 @@
return env->NewStringUTF(fac.data.c_str());
}
+static constexpr const char* GetPrimitiveTypeName(jvmtiPrimitiveType type) {
+ switch (type) {
+ case JVMTI_PRIMITIVE_TYPE_BOOLEAN:
+ return "boolean";
+ case JVMTI_PRIMITIVE_TYPE_BYTE:
+ return "byte";
+ case JVMTI_PRIMITIVE_TYPE_CHAR:
+ return "char";
+ case JVMTI_PRIMITIVE_TYPE_SHORT:
+ return "short";
+ case JVMTI_PRIMITIVE_TYPE_INT:
+ return "int";
+ case JVMTI_PRIMITIVE_TYPE_FLOAT:
+ return "float";
+ case JVMTI_PRIMITIVE_TYPE_LONG:
+ return "long";
+ case JVMTI_PRIMITIVE_TYPE_DOUBLE:
+ return "double";
+ }
+ LOG(FATAL) << "Unknown type " << static_cast<size_t>(type);
+ UNREACHABLE();
+}
+
+extern "C" JNIEXPORT jstring JNICALL Java_Main_iterateThroughHeapPrimitiveFields(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jlong tag) {
+ struct FindFieldCallbacks {
+ explicit FindFieldCallbacks(jlong t) : tag_to_find(t) {}
+
+ static jint JNICALL HeapIterationCallback(jlong class_tag ATTRIBUTE_UNUSED,
+ jlong size ATTRIBUTE_UNUSED,
+ jlong* tag_ptr ATTRIBUTE_UNUSED,
+ jint length ATTRIBUTE_UNUSED,
+ void* user_data ATTRIBUTE_UNUSED) {
+ return 0;
+ }
+
+ static jint JNICALL PrimitiveFieldValueCallback(jvmtiHeapReferenceKind kind,
+ const jvmtiHeapReferenceInfo* info,
+ jlong class_tag,
+ jlong* tag_ptr,
+ jvalue value,
+ jvmtiPrimitiveType value_type,
+ void* user_data) {
+ FindFieldCallbacks* p = reinterpret_cast<FindFieldCallbacks*>(user_data);
+ if (*tag_ptr >= p->tag_to_find) {
+ std::ostringstream oss;
+ oss << *tag_ptr
+ << '@'
+ << class_tag
+ << " ("
+ << (kind == JVMTI_HEAP_REFERENCE_FIELD ? "instance, " : "static, ")
+ << GetPrimitiveTypeName(value_type)
+ << ", index="
+ << info->field.index
+ << ") ";
+ // Be lazy, always print eight bytes.
+ static_assert(sizeof(jvalue) == sizeof(uint64_t), "Unexpected jvalue size");
+ uint64_t val;
+ memcpy(&val, &value, sizeof(uint64_t)); // To avoid undefined behavior.
+ oss << android::base::StringPrintf("%016" PRIx64, val);
+
+ if (!p->data.empty()) {
+ p->data += "\n";
+ }
+ p->data += oss.str();
+ *tag_ptr = *tag_ptr + 1;
+ }
+ return 0;
+ }
+
+ std::string data;
+ const jlong tag_to_find;
+ };
+
+ jvmtiHeapCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiHeapCallbacks));
+ callbacks.heap_iteration_callback = FindFieldCallbacks::HeapIterationCallback;
+ callbacks.primitive_field_callback = FindFieldCallbacks::PrimitiveFieldValueCallback;
+
+ FindFieldCallbacks ffc(tag);
+ jvmtiError ret = jvmti_env->IterateThroughHeap(0, nullptr, &callbacks, &ffc);
+ if (JvmtiErrorToException(env, ret)) {
+ return nullptr;
+ }
+ return env->NewStringUTF(ffc.data.c_str());
+}
+
} // namespace Test906IterateHeap
} // namespace art
diff --git a/test/906-iterate-heap/src/Main.java b/test/906-iterate-heap/src/Main.java
index d499886..365ce0f 100644
--- a/test/906-iterate-heap/src/Main.java
+++ b/test/906-iterate-heap/src/Main.java
@@ -119,6 +119,60 @@
setTag(dArray, 1);
System.out.println(iterateThroughHeapPrimitiveArray(getTag(dArray)));
System.out.println(getTag(dArray));
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ doTestPrimitiveFieldsClasses();
+
+ doTestPrimitiveFieldsIntegral();
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ doTestPrimitiveFieldsFloat();
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+ }
+
+ private static void doTestPrimitiveFieldsClasses() {
+ setTag(IntObject.class, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(IntObject.class));
+ setTag(IntObject.class, 0);
+
+ setTag(FloatObject.class, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(FloatObject.class));
+ setTag(FloatObject.class, 0);
+
+ setTag(Inf1.class, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(Inf1.class));
+ setTag(Inf1.class, 0);
+
+ setTag(Inf2.class, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(Inf2.class));
+ setTag(Inf2.class, 0);
+ }
+
+ private static void doTestPrimitiveFieldsIntegral() {
+ IntObject intObject = new IntObject();
+ setTag(intObject, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(intObject));
+ }
+
+ private static void doTestPrimitiveFieldsFloat() {
+ FloatObject floatObject = new FloatObject();
+ setTag(floatObject, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(floatObject));
}
static class A {
@@ -172,6 +226,31 @@
return ret;
}
+ private static interface Inf1 {
+ public final static int A = 1;
+ }
+
+ private static interface Inf2 extends Inf1 {
+ public final static int B = 1;
+ }
+
+ private static class IntObject implements Inf1 {
+ byte b = (byte)1;
+ char c= 'a';
+ short s = (short)2;
+ int i = 3;
+ long l = 4;
+ Object o = new Object();
+ static int sI = 5;
+ }
+
+ private static class FloatObject extends IntObject implements Inf2 {
+ float f = 1.23f;
+ double d = 1.23;
+ Object p = new Object();
+ static int sI = 6;
+ }
+
private static native void setTag(Object o, long tag);
private static native long getTag(Object o);
@@ -188,4 +267,5 @@
Class<?> klassFilter);
private static native String iterateThroughHeapString(long tag);
private static native String iterateThroughHeapPrimitiveArray(long tag);
+ private static native String iterateThroughHeapPrimitiveFields(long tag);
}
diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt
index c96edef..fc2761e 100644
--- a/test/913-heaps/expected.txt
+++ b/test/913-heaps/expected.txt
@@ -8,34 +8,34 @@
1002@0 --(interface)--> 2001@0 [size=124, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
1@1000 --(class)--> 1000@0 [size=123, length=-1]
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
2001@0 --(interface)--> 2000@0 [size=124, length=-1]
2@1000 --(class)--> 1000@0 [size=123, length=-1]
3@1001 --(class)--> 1001@0 [size=123, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
4@1000 --(class)--> 1000@0 [size=123, length=-1]
5@1002 --(class)--> 1002@0 [size=123, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123, length=-1]
---
1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
1002@0 --(interface)--> 2001@0 [size=124, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
1@1000 --(class)--> 1000@0 [size=123, length=-1]
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
2001@0 --(interface)--> 2000@0 [size=124, length=-1]
2@1000 --(class)--> 1000@0 [size=123, length=-1]
3@1001 --(class)--> 1001@0 [size=123, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
4@1000 --(class)--> 1000@0 [size=123, length=-1]
5@1002 --(class)--> 1002@0 [size=123, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123, length=-1]
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
@@ -49,34 +49,34 @@
1002@0 --(interface)--> 2001@0 [size=124, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
1@1000 --(class)--> 1000@0 [size=123, length=-1]
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
2001@0 --(interface)--> 2000@0 [size=124, length=-1]
2@1000 --(class)--> 1000@0 [size=123, length=-1]
3@1001 --(class)--> 1001@0 [size=123, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
4@1000 --(class)--> 1000@0 [size=123, length=-1]
5@1002 --(class)--> 1002@0 [size=123, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123, length=-1]
---
1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
1002@0 --(interface)--> 2001@0 [size=124, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
1@1000 --(class)--> 1000@0 [size=123, length=-1]
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
2001@0 --(interface)--> 2000@0 [size=124, length=-1]
2@1000 --(class)--> 1000@0 [size=123, length=-1]
3@1001 --(class)--> 1001@0 [size=123, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
4@1000 --(class)--> 1000@0 [size=123, length=-1]
5@1002 --(class)--> 1002@0 [size=123, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123, length=-1]
---
[1@0 (32, 'HelloWorld'), 2@0 (16, '')]
@@ -91,18 +91,42 @@
4@0 (18, 3xS '010002000300')
1@0 (14, 2xZ '0001')
23456789
+10000@0 (static, int, index=3) 0000000000000000
+10001
+10000@0 (static, int, index=11) 0000000000000000
+10001
+10000@0 (static, int, index=0) 0000000000000000
+10001
+10000@0 (static, int, index=1) 0000000000000000
+10001
+10000@0 (instance, int, index=2) 0000000000000000
+10001@0 (instance, byte, index=4) 0000000000000001
+10002@0 (instance, char, index=5) 0000000000000061
+10003@0 (instance, int, index=6) 0000000000000003
+10004@0 (instance, long, index=7) 0000000000000004
+10005@0 (instance, short, index=9) 0000000000000002
+10006
+10000@0 (instance, int, index=3) 0000000000000000
+10001@0 (instance, byte, index=5) 0000000000000001
+10002@0 (instance, char, index=6) 0000000000000061
+10003@0 (instance, int, index=7) 0000000000000003
+10004@0 (instance, long, index=8) 0000000000000004
+10005@0 (instance, short, index=10) 0000000000000002
+10006@0 (instance, double, index=12) 3ff3ae147ae147ae
+10007@0 (instance, float, index=13) 000000003f9d70a4
+10008
--- klass ---
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 32])--> 1@1000 [size=16, length=-1]
0@0 --(array-element@0)--> 1@1000 [size=16, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
---
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
@@ -110,15 +134,15 @@
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 10])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
---
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
---
--- heap_filter ---
---- tagged objects
@@ -135,34 +159,34 @@
1002@0 --(interface)--> 2001@0 [size=124, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
1@1000 --(class)--> 1000@0 [size=123, length=-1]
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
2001@0 --(interface)--> 2000@0 [size=124, length=-1]
2@1000 --(class)--> 1000@0 [size=123, length=-1]
3@1001 --(class)--> 1001@0 [size=123, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
4@1000 --(class)--> 1000@0 [size=123, length=-1]
5@1002 --(class)--> 1002@0 [size=123, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123, length=-1]
---
1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
1002@0 --(interface)--> 2001@0 [size=124, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
1@1000 --(class)--> 1000@0 [size=123, length=-1]
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
2001@0 --(interface)--> 2000@0 [size=124, length=-1]
2@1000 --(class)--> 1000@0 [size=123, length=-1]
3@1001 --(class)--> 1001@0 [size=123, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
4@1000 --(class)--> 1000@0 [size=123, length=-1]
5@1002 --(class)--> 1002@0 [size=123, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123, length=-1]
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
@@ -176,34 +200,34 @@
1002@0 --(interface)--> 2001@0 [size=124, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
1@1000 --(class)--> 1000@0 [size=123, length=-1]
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
2001@0 --(interface)--> 2000@0 [size=124, length=-1]
2@1000 --(class)--> 1000@0 [size=123, length=-1]
3@1001 --(class)--> 1001@0 [size=123, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
4@1000 --(class)--> 1000@0 [size=123, length=-1]
5@1002 --(class)--> 1002@0 [size=123, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123, length=-1]
---
1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
1002@0 --(interface)--> 2001@0 [size=124, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
1@1000 --(class)--> 1000@0 [size=123, length=-1]
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
2001@0 --(interface)--> 2000@0 [size=124, length=-1]
2@1000 --(class)--> 1000@0 [size=123, length=-1]
3@1001 --(class)--> 1001@0 [size=123, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
4@1000 --(class)--> 1000@0 [size=123, length=-1]
5@1002 --(class)--> 1002@0 [size=123, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123, length=-1]
---
---- tagged classes
@@ -257,19 +281,19 @@
---- untagged classes
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 32])--> 1@1000 [size=16, length=-1]
0@0 --(array-element@0)--> 1@1000 [size=16, length=-1]
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
---
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
@@ -277,17 +301,17 @@
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 10])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
---
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
---
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index 99bc48e..39fa000 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -654,5 +654,95 @@
return env->NewStringUTF(fac.data.c_str());
}
+static constexpr const char* GetPrimitiveTypeName(jvmtiPrimitiveType type) {
+ switch (type) {
+ case JVMTI_PRIMITIVE_TYPE_BOOLEAN:
+ return "boolean";
+ case JVMTI_PRIMITIVE_TYPE_BYTE:
+ return "byte";
+ case JVMTI_PRIMITIVE_TYPE_CHAR:
+ return "char";
+ case JVMTI_PRIMITIVE_TYPE_SHORT:
+ return "short";
+ case JVMTI_PRIMITIVE_TYPE_INT:
+ return "int";
+ case JVMTI_PRIMITIVE_TYPE_FLOAT:
+ return "float";
+ case JVMTI_PRIMITIVE_TYPE_LONG:
+ return "long";
+ case JVMTI_PRIMITIVE_TYPE_DOUBLE:
+ return "double";
+ }
+ LOG(FATAL) << "Unknown type " << static_cast<size_t>(type);
+ UNREACHABLE();
+}
+
+extern "C" JNIEXPORT jstring JNICALL Java_Main_followReferencesPrimitiveFields(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject initial_object) {
+ struct FindFieldCallbacks {
+ static jint JNICALL FollowReferencesCallback(
+ jvmtiHeapReferenceKind reference_kind ATTRIBUTE_UNUSED,
+ const jvmtiHeapReferenceInfo* reference_info ATTRIBUTE_UNUSED,
+ jlong class_tag ATTRIBUTE_UNUSED,
+ jlong referrer_class_tag ATTRIBUTE_UNUSED,
+ jlong size ATTRIBUTE_UNUSED,
+ jlong* tag_ptr ATTRIBUTE_UNUSED,
+ jlong* referrer_tag_ptr ATTRIBUTE_UNUSED,
+ jint length ATTRIBUTE_UNUSED,
+ void* user_data ATTRIBUTE_UNUSED) {
+ return JVMTI_VISIT_OBJECTS; // Continue visiting.
+ }
+
+ static jint JNICALL PrimitiveFieldValueCallback(jvmtiHeapReferenceKind kind,
+ const jvmtiHeapReferenceInfo* info,
+ jlong class_tag,
+ jlong* tag_ptr,
+ jvalue value,
+ jvmtiPrimitiveType value_type,
+ void* user_data) {
+ FindFieldCallbacks* p = reinterpret_cast<FindFieldCallbacks*>(user_data);
+ if (*tag_ptr != 0) {
+ std::ostringstream oss;
+ oss << *tag_ptr
+ << '@'
+ << class_tag
+ << " ("
+ << (kind == JVMTI_HEAP_REFERENCE_FIELD ? "instance, " : "static, ")
+ << GetPrimitiveTypeName(value_type)
+ << ", index="
+ << info->field.index
+ << ") ";
+ // Be lazy, always print eight bytes.
+ static_assert(sizeof(jvalue) == sizeof(uint64_t), "Unexpected jvalue size");
+ uint64_t val;
+ memcpy(&val, &value, sizeof(uint64_t)); // To avoid undefined behavior.
+ oss << android::base::StringPrintf("%016" PRIx64, val);
+
+ if (!p->data.empty()) {
+ p->data += "\n";
+ }
+ p->data += oss.str();
+ // Update the tag to test whether that works.
+ *tag_ptr = *tag_ptr + 1;
+ }
+ return 0;
+ }
+
+ std::string data;
+ };
+
+ jvmtiHeapCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiHeapCallbacks));
+ callbacks.heap_reference_callback = FindFieldCallbacks::FollowReferencesCallback;
+ callbacks.primitive_field_callback = FindFieldCallbacks::PrimitiveFieldValueCallback;
+
+ FindFieldCallbacks ffc;
+ jvmtiError ret = jvmti_env->FollowReferences(0, nullptr, initial_object, &callbacks, &ffc);
+ if (JvmtiErrorToException(env, ret)) {
+ return nullptr;
+ }
+ return env->NewStringUTF(ffc.data.c_str());
+}
+
} // namespace Test913Heaps
} // namespace art
diff --git a/test/913-heaps/src/Main.java b/test/913-heaps/src/Main.java
index 14ee268..66f6883 100644
--- a/test/913-heaps/src/Main.java
+++ b/test/913-heaps/src/Main.java
@@ -34,6 +34,7 @@
Runtime.getRuntime().gc();
doPrimitiveArrayTest();
+ doPrimitiveFieldTest();
Runtime.getRuntime().gc();
Runtime.getRuntime().gc();
@@ -124,6 +125,62 @@
System.out.println(getTag(dArray));
}
+ public static void doPrimitiveFieldTest() throws Exception {
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ doTestPrimitiveFieldsClasses();
+
+ doTestPrimitiveFieldsIntegral();
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ doTestPrimitiveFieldsFloat();
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+ }
+
+ private static void doTestPrimitiveFieldsClasses() {
+ setTag(IntObject.class, 10000);
+ System.out.println(followReferencesPrimitiveFields(IntObject.class));
+ System.out.println(getTag(IntObject.class));
+ setTag(IntObject.class, 0);
+
+ setTag(FloatObject.class, 10000);
+ System.out.println(followReferencesPrimitiveFields(FloatObject.class));
+ System.out.println(getTag(FloatObject.class));
+ setTag(FloatObject.class, 0);
+
+ setTag(Inf1.class, 10000);
+ System.out.println(followReferencesPrimitiveFields(Inf1.class));
+ System.out.println(getTag(Inf1.class));
+ setTag(Inf1.class, 0);
+
+ setTag(Inf2.class, 10000);
+ System.out.println(followReferencesPrimitiveFields(Inf2.class));
+ System.out.println(getTag(Inf2.class));
+ setTag(Inf2.class, 0);
+ }
+
+ private static void doTestPrimitiveFieldsIntegral() {
+ IntObject intObject = new IntObject();
+ setTag(intObject, 10000);
+ System.out.println(followReferencesPrimitiveFields(intObject));
+ System.out.println(getTag(intObject));
+ }
+
+ private static void doTestPrimitiveFieldsFloat() {
+ FloatObject floatObject = new FloatObject();
+ setTag(floatObject, 10000);
+ System.out.println(followReferencesPrimitiveFields(floatObject));
+ System.out.println(getTag(floatObject));
+ }
+
private static void run() {
clearStats();
forceGarbageCollection();
@@ -315,6 +372,31 @@
}
}
+ private static interface Inf1 {
+ public final static int A = 1;
+ }
+
+ private static interface Inf2 extends Inf1 {
+ public final static int B = 1;
+ }
+
+ private static class IntObject implements Inf1 {
+ byte b = (byte)1;
+ char c= 'a';
+ short s = (short)2;
+ int i = 3;
+ long l = 4;
+ Object o = new Object();
+ static int sI = 5;
+ }
+
+ private static class FloatObject extends IntObject implements Inf2 {
+ float f = 1.23f;
+ double d = 1.23;
+ Object p = new Object();
+ static int sI = 6;
+ }
+
public static class Verifier {
// Should roots with vreg=-1 be printed?
public final static boolean PRINT_ROOTS_WITH_UNKNOWN_VREG = false;
@@ -508,4 +590,5 @@
Object initialObject, int stopAfter, int followSet, Object jniRef);
public static native String[] followReferencesString(Object initialObject);
public static native String followReferencesPrimitiveArray(Object initialObject);
+ public static native String followReferencesPrimitiveFields(Object initialObject);
}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 95967b5..bfb04a4 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -368,6 +368,7 @@
# Tests that are broken with GC stress.
# * 137-cfi needs to unwind a second forked process. We're using a primitive sleep to wait till we
# hope the second process got into the expected state. The slowness of gcstress makes this bad.
+# * 152-dead-large-object requires a heap larger than what gcstress uses.
# * 908-gc-start-finish expects GCs only to be run at clear points. The reduced heap size makes
# this non-deterministic. Same for 913.
# * 961-default-iface-resolution-gen and 964-default-iface-init-genare very long tests that often
@@ -375,6 +376,7 @@
# slows down allocations significantly which these tests do a lot.
TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \
137-cfi \
+ 152-dead-large-object \
154-gc-loop \
908-gc-start-finish \
913-heaps \
@@ -810,6 +812,12 @@
TEST_ART_TARGET_SYNC_DEPS += libopenjdkjvmti
TEST_ART_TARGET_SYNC_DEPS += libopenjdkjvmtid
+TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar
+TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
+TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar
+TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar
+TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar
+
# All tests require the host executables. The tests also depend on the core images, but on
# specific version depending on the compiler.
ART_TEST_HOST_RUN_TEST_DEPENDENCIES := \
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 99926aa..0ac5481 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -376,10 +376,8 @@
bpath_suffix="-hostdex"
else
framework="${ANDROID_ROOT}/framework"
- bpath_suffix=""
+ bpath_suffix="-testdex"
fi
- # TODO If the target was compiled WITH_DEXPREOPT=true then these tests will
- # fail since these jar files will be stripped.
bpath="${framework}/core-libart${bpath_suffix}.jar"
bpath="${bpath}:${framework}/core-oj${bpath_suffix}.jar"
bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar"
@@ -458,8 +456,9 @@
FLAGS="$FLAGS -Xnorelocate"
COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xnorelocate"
if [ "$HOST" = "y" ]; then
- # Increase ulimit to 64MB in case we are running hprof test.
- ulimit -S 64000 || exit 1
+ # Increase ulimit to 128MB in case we are running hprof test,
+ # or string append test with art-debug-gc.
+ ulimit -S 128000 || exit 1
fi
fi
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 784f49c..6caf7b0 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -106,6 +106,11 @@
"slowness of gcstress makes this bad."]
},
{
+ "test": "152-dead-large-object",
+ "variant": "gcstress",
+ "description": ["152-dead-large-object requires a heap larger than what gcstress uses."]
+ },
+ {
"tests": ["908-gc-start-finish",
"913-heaps"],
"variant": "gcstress",
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 6529640..e0aae46 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -216,5 +216,55 @@
modes: [device],
names: ["libcore.java.lang.ProcessBuilderTest#testRedirectInherit",
"libcore.java.lang.ProcessBuilderTest#testRedirect_nullStreams"]
+},
+{
+ description: "Linker issues with libjavacoretests",
+ result: EXEC_FAILED,
+ bug: 35417197,
+ modes: [device],
+ names: [
+ "dalvik.system.JniTest#testGetSuperclass",
+ "dalvik.system.JniTest#testPassingBooleans",
+ "dalvik.system.JniTest#testPassingBytes",
+ "dalvik.system.JniTest#testPassingChars",
+ "dalvik.system.JniTest#testPassingClass",
+ "dalvik.system.JniTest#testPassingDoubles",
+ "dalvik.system.JniTest#testPassingFloats",
+ "dalvik.system.JniTest#testPassingInts",
+ "dalvik.system.JniTest#testPassingLongs",
+ "dalvik.system.JniTest#testPassingObjectReferences",
+ "dalvik.system.JniTest#testPassingShorts",
+ "dalvik.system.JniTest#testPassingThis",
+ "libcore.java.lang.OldSystemTest#test_load",
+ "libcore.java.lang.ThreadTest#testContextClassLoaderIsInherited",
+ "libcore.java.lang.ThreadTest#testContextClassLoaderIsNotNull",
+ "libcore.java.lang.ThreadTest#testGetAllStackTracesIncludesAllGroups",
+ "libcore.java.lang.ThreadTest#testGetStackTrace",
+ "libcore.java.lang.ThreadTest#testJavaContextClassLoader",
+ "libcore.java.lang.ThreadTest#testLeakingStartedThreads",
+ "libcore.java.lang.ThreadTest#testLeakingUnstartedThreads",
+ "libcore.java.lang.ThreadTest#testNativeThreadNames",
+ "libcore.java.lang.ThreadTest#testParkUntilWithUnderflowValue",
+ "libcore.java.lang.ThreadTest#testThreadDoubleStart",
+ "libcore.java.lang.ThreadTest#testThreadInterrupted",
+ "libcore.java.lang.ThreadTest#testThreadRestart",
+ "libcore.java.lang.ThreadTest#testThreadSleep",
+ "libcore.java.lang.ThreadTest#testThreadSleepIllegalArguments",
+ "libcore.java.lang.ThreadTest#testThreadWakeup",
+ "libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_calledBeforeDefaultHandler",
+ "libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_noDefaultHandler",
+ "libcore.java.util.TimeZoneTest#testDisplayNamesWithScript",
+ "libcore.java.util.zip.ZipEntryTest#testCommentAndExtraInSameOrder",
+ "libcore.java.util.zip.ZipEntryTest#testMaxLengthExtra",
+ "libcore.util.NativeAllocationRegistryTest#testBadSize",
+ "libcore.util.NativeAllocationRegistryTest#testEarlyFree",
+ "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndNoSharedRegistry",
+ "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndSharedRegistry",
+ "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndNoSharedRegistry",
+ "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndSharedRegistry",
+ "libcore.util.NativeAllocationRegistryTest#testNullArguments",
+ "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_y",
+ "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_yy"
+ ]
}
]
diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh
index 1e9c763..7eaaaf9 100755
--- a/tools/setup-buildbot-device.sh
+++ b/tools/setup-buildbot-device.sh
@@ -17,9 +17,33 @@
green='\033[0;32m'
nc='\033[0m'
+# Setup as root, as the next buildbot step (device cleanup) requires it.
+# This is also required to set the date, if needed.
+adb root
+adb wait-for-device
+
+echo -e "${green}Date on host${nc}"
+date
+
echo -e "${green}Date on device${nc}"
adb shell date
+host_seconds_since_epoch=$(date -u +%s)
+device_seconds_since_epoch=$(adb shell date -u +%s)
+
+abs_time_difference_in_seconds=$(expr $host_seconds_since_epoch - $device_seconds_since_epoch)
+if [ $abs_time_difference_in_seconds -lt 0 ]; then
+ abs_time_difference_in_seconds=$(expr 0 - $abs_time_difference_in_seconds)
+fi
+
+seconds_per_hour=3600
+
+# Update date on device if the difference with host is more than one hour.
+if [ $abs_time_difference_in_seconds -gt $seconds_per_hour ]; then
+ echo -e "${green}Update date on device${nc}"
+ adb shell date -u @$host_seconds_since_epoch
+fi
+
echo -e "${green}Turn off selinux${nc}"
adb shell setenforce 0
adb shell getenforce