Merge "ARM: VIXL32: Remove erroneous DCHECK."
diff --git a/Android.bp b/Android.bp
index b9f1db5..d0e22fb 100644
--- a/Android.bp
+++ b/Android.bp
@@ -27,6 +27,7 @@
"dexdump",
"dexlayout",
"dexlist",
+ "dexoptanalyzer",
"disassembler",
"imgdiag",
"oatdump",
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
new file mode 100644
index 0000000..cf1832b
--- /dev/null
+++ b/PREUPLOAD.cfg
@@ -0,0 +1,2 @@
+[Hook Scripts]
+check_generated_files_up_to_date = tools/cpp-define-generator/presubmit-check-files-up-to-date
diff --git a/build/Android.bp b/build/Android.bp
index cd9d74a..b1553c7 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -70,6 +70,8 @@
"-DART_STACK_OVERFLOW_GAP_mips64=16384",
"-DART_STACK_OVERFLOW_GAP_x86=8192",
"-DART_STACK_OVERFLOW_GAP_x86_64=8192",
+ // Enable thread annotations for std::mutex, etc.
+ "-D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS",
],
target: {
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index e568ce2..6de5aef 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -109,6 +109,7 @@
ART_CORE_DEBUGGABLE_EXECUTABLES := \
dex2oat \
+ dexoptanalyzer \
imgdiag \
oatdump \
patchoat \
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 291db8b..b7a2379 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -87,8 +87,8 @@
# Do you want tests with the JNI forcecopy mode enabled run?
ART_TEST_JNI_FORCECOPY ?= $(ART_TEST_FULL)
-# Do you want run-tests with relocation disabled run?
-ART_TEST_RUN_TEST_NO_RELOCATE ?= $(ART_TEST_FULL)
+# Do you want run-tests with relocation enabled run?
+ART_TEST_RUN_TEST_RELOCATE ?= $(ART_TEST_FULL)
# Do you want run-tests with prebuilding?
ART_TEST_RUN_TEST_PREBUILD ?= true
@@ -96,6 +96,9 @@
# Do you want run-tests with no prebuilding enabled run?
ART_TEST_RUN_TEST_NO_PREBUILD ?= $(ART_TEST_FULL)
+# Do you want run-tests with a pregenerated core.art?
+ART_TEST_RUN_TEST_IMAGE ?= true
+
# Do you want run-tests without a pregenerated core.art?
ART_TEST_RUN_TEST_NO_IMAGE ?= $(ART_TEST_FULL)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index c87075f..e525808 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -28,6 +28,7 @@
DexToDexDecompiler \
ErroneousA \
ErroneousB \
+ ErroneousInit \
ExceptionHandle \
GetMethodSignature \
ImageLayoutA \
@@ -87,7 +88,7 @@
ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary Nested
ART_GTEST_atomic_method_ref_map_test_DEX_DEPS := Interfaces
-ART_GTEST_class_linker_test_DEX_DEPS := AllFields ErroneousA ErroneousB Interfaces MethodTypes MultiDex MyClass Nested Statics StaticsFromCode
+ART_GTEST_class_linker_test_DEX_DEPS := AllFields ErroneousA ErroneousB ErroneousInit Interfaces MethodTypes MultiDex MyClass Nested Statics StaticsFromCode
ART_GTEST_class_table_test_DEX_DEPS := XandY
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods ProfileTestMultiDex
ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes
@@ -100,6 +101,7 @@
ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
ART_GTEST_jni_internal_test_DEX_DEPS := AllFields StaticLeafMethods
ART_GTEST_oat_file_assistant_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS)
+ART_GTEST_dexoptanalyzer_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS)
ART_GTEST_oat_file_test_DEX_DEPS := Main MultiDex
ART_GTEST_oat_test_DEX_DEPS := Main
ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY
@@ -111,6 +113,7 @@
ART_GTEST_stub_test_DEX_DEPS := AllFields
ART_GTEST_transaction_test_DEX_DEPS := Transaction
ART_GTEST_type_lookup_table_test_DEX_DEPS := Lookup
+ART_GTEST_unstarted_runtime_test_DEX_DEPS := Nested
ART_GTEST_verifier_deps_test_DEX_DEPS := VerifierDeps MultiDex
ART_GTEST_dex_to_dex_decompiler_test_DEX_DEPS := VerifierDeps DexToDexDecompiler
@@ -136,6 +139,12 @@
ART_GTEST_oat_file_assistant_test_TARGET_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS)
+ART_GTEST_dexoptanalyzer_test_HOST_DEPS := \
+ $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \
+ $(HOST_OUT_EXECUTABLES)/dexoptanalyzerd
+ART_GTEST_dexoptanalyzer_test_TARGET_DEPS := \
+ $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \
+ dexoptanalyzerd
ART_GTEST_dex2oat_test_HOST_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
@@ -219,6 +228,7 @@
art_dexdump_tests \
art_dexlayout_tests \
art_dexlist_tests \
+ art_dexoptanalyzer_tests \
art_imgdiag_tests \
art_oatdump_tests \
art_profman_tests \
@@ -614,6 +624,9 @@
ART_GTEST_oat_file_assistant_test_DEX_DEPS :=
ART_GTEST_oat_file_assistant_test_HOST_DEPS :=
ART_GTEST_oat_file_assistant_test_TARGET_DEPS :=
+ART_GTEST_dexoptanalyzer_test_DEX_DEPS :=
+ART_GTEST_dexoptanalyzer_test_HOST_DEPS :=
+ART_GTEST_dexoptanalyzer_test_TARGET_DEPS :=
ART_GTEST_dex2oat_test_DEX_DEPS :=
ART_GTEST_dex2oat_test_HOST_DEPS :=
ART_GTEST_dex2oat_test_TARGET_DEPS :=
diff --git a/build/art.go b/build/art.go
index 84269c3..baa6e59 100644
--- a/build/art.go
+++ b/build/art.go
@@ -58,7 +58,7 @@
asflags = append(asflags, "-DART_HEAP_POISONING=1")
}
- if !envFalse(ctx, "ART_USE_READ_BARRIER") || ctx.AConfig().ArtUseReadBarrier() {
+ if !envFalse(ctx, "ART_USE_READ_BARRIER") && ctx.AConfig().ArtUseReadBarrier() {
// Used to change the read barrier type. Valid values are BAKER, BROOKS, TABLELOOKUP.
// The default is BAKER.
barrierType := envDefault(ctx, "ART_READ_BARRIER_TYPE", "BAKER")
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 28c009e..f1123eb 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -766,10 +766,6 @@
Result ParseAndAppend(const std::string& option, ExperimentalFlags& existing) {
if (option == "none") {
existing = ExperimentalFlags::kNone;
- } else if (option == "agents") {
- existing = existing | ExperimentalFlags::kAgents;
- } else if (option == "runtime-plugins") {
- existing = existing | ExperimentalFlags::kRuntimePlugins;
} else {
return Result::Failure(std::string("Unknown option '") + option + "'");
}
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 46f3358..f6a4db4 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -80,6 +80,7 @@
"optimizing/register_allocator_graph_color.cc",
"optimizing/register_allocator_linear_scan.cc",
"optimizing/select_generator.cc",
+ "optimizing/scheduler.cc",
"optimizing/sharpening.cc",
"optimizing/side_effects_analysis.cc",
"optimizing/ssa_builder.cc",
@@ -123,6 +124,7 @@
"jni/quick/arm64/calling_convention_arm64.cc",
"linker/arm64/relative_patcher_arm64.cc",
"optimizing/code_generator_arm64.cc",
+ "optimizing/scheduler_arm64.cc",
"optimizing/instruction_simplifier_arm64.cc",
"optimizing/intrinsics_arm64.cc",
"optimizing/nodes_arm64.cc",
@@ -362,6 +364,7 @@
"jni/jni_cfi_test.cc",
"optimizing/codegen_test.cc",
"optimizing/optimizing_cfi_test.cc",
+ "optimizing/scheduler_test.cc",
],
codegen: {
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index f056dd3..f296851 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -135,65 +135,6 @@
return referrer_class->CanAccessResolvedMethod(access_to, method, dex_cache, field_idx);
}
-template <typename ArtMember>
-inline std::pair<bool, bool> CompilerDriver::IsClassOfStaticMemberAvailableToReferrer(
- mirror::DexCache* dex_cache,
- mirror::Class* referrer_class,
- ArtMember* resolved_member,
- uint16_t member_idx,
- dex::TypeIndex* storage_index) {
- DCHECK(resolved_member->IsStatic());
- if (LIKELY(referrer_class != nullptr)) {
- ObjPtr<mirror::Class> members_class = resolved_member->GetDeclaringClass();
- if (members_class == referrer_class) {
- *storage_index = members_class->GetDexTypeIndex();
- return std::make_pair(true, true);
- }
- if (CanAccessResolvedMember<ArtMember>(
- referrer_class, members_class.Ptr(), resolved_member, dex_cache, member_idx)) {
- // We have the resolved member, we must make it into a index for the referrer
- // in its static storage (which may fail if it doesn't have a slot for it)
- // TODO: for images we can elide the static storage base null check
- // if we know there's a non-null entry in the image
- const DexFile* dex_file = dex_cache->GetDexFile();
- dex::TypeIndex storage_idx(DexFile::kDexNoIndex16);
- if (LIKELY(members_class->GetDexCache() == dex_cache)) {
- // common case where the dex cache of both the referrer and the member are the same,
- // no need to search the dex file
- storage_idx = members_class->GetDexTypeIndex();
- } else {
- // Search dex file for localized ssb index, may fail if member's class is a parent
- // of the class mentioned in the dex file and there is no dex cache entry.
- storage_idx = resolved_member->GetDeclaringClass()->FindTypeIndexInOtherDexFile(*dex_file);
- }
- if (storage_idx.IsValid()) {
- *storage_index = storage_idx;
- return std::make_pair(true, !resolved_member->IsFinal());
- }
- }
- }
- // Conservative defaults.
- *storage_index = dex::TypeIndex(DexFile::kDexNoIndex16);
- return std::make_pair(false, false);
-}
-
-inline std::pair<bool, bool> CompilerDriver::IsFastStaticField(
- mirror::DexCache* dex_cache, mirror::Class* referrer_class,
- ArtField* resolved_field, uint16_t field_idx, dex::TypeIndex* storage_index) {
- return IsClassOfStaticMemberAvailableToReferrer(
- dex_cache, referrer_class, resolved_field, field_idx, storage_index);
-}
-
-inline bool CompilerDriver::IsClassOfStaticMethodAvailableToReferrer(
- mirror::DexCache* dex_cache, mirror::Class* referrer_class,
- ArtMethod* resolved_method, uint16_t method_idx, dex::TypeIndex* storage_index) {
- std::pair<bool, bool> result = IsClassOfStaticMemberAvailableToReferrer(
- dex_cache, referrer_class, resolved_method, method_idx, storage_index);
- // Only the first member of `result` is meaningful, as there is no
- // "write access" to a method.
- return result.first;
-}
-
inline ArtMethod* CompilerDriver::ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index c03ffca..1d4eaf8 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -529,9 +529,15 @@
// We store the verification information in the class status in the oat file, which the linker
// can validate (checksums) and use to skip load-time verification. It is thus safe to
// optimize when a class has been fully verified before.
+ optimizer::DexToDexCompilationLevel max_level = optimizer::DexToDexCompilationLevel::kOptimize;
+ if (driver.GetCompilerOptions().GetDebuggable()) {
+ // We are debuggable so definitions of classes might be changed. We don't want to do any
+ // optimizations that could break that.
+ max_level = optimizer::DexToDexCompilationLevel::kRequired;
+ }
if (klass->IsVerified()) {
// Class is verified so we can enable DEX-to-DEX compilation for performance.
- return optimizer::DexToDexCompilationLevel::kOptimize;
+ return max_level;
} else if (klass->IsCompileTimeVerified()) {
// Class verification has soft-failed. Anyway, ensure at least correctness.
DCHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime);
@@ -940,6 +946,31 @@
DCHECK(single_thread_pool_ != nullptr);
}
+static void EnsureVerifiedOrVerifyAtRuntime(jobject jclass_loader,
+ const std::vector<const DexFile*>& dex_files) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
+ MutableHandle<mirror::Class> cls(hs.NewHandle<mirror::Class>(nullptr));
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+
+ for (const DexFile* dex_file : dex_files) {
+ for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ const char* descriptor = dex_file->GetClassDescriptor(class_def);
+ cls.Assign(class_linker->FindClass(soa.Self(), descriptor, class_loader));
+ if (cls.Get() == nullptr) {
+ soa.Self()->ClearException();
+ } else if (&cls->GetDexFile() == dex_file) {
+ DCHECK(cls->IsErroneous() || cls->IsVerified() || cls->IsCompileTimeVerified())
+ << cls->PrettyClass()
+ << " " << cls->GetStatus();
+ }
+ }
+ }
+}
+
void CompilerDriver::PreCompile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings) {
@@ -984,6 +1015,9 @@
}
if (compiler_options_->IsAnyMethodCompilationEnabled()) {
+ if (kIsDebugBuild) {
+ EnsureVerifiedOrVerifyAtRuntime(class_loader, dex_files);
+ }
InitializeClasses(class_loader, dex_files, timings);
VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString(false);
}
@@ -1034,23 +1068,6 @@
return result;
}
-bool CompilerDriver::ShouldVerifyClassBasedOnProfile(const DexFile& dex_file,
- uint16_t class_idx) const {
- if (!compiler_options_->VerifyOnlyProfile()) {
- // No profile, verify everything.
- return true;
- }
- DCHECK(profile_compilation_info_ != nullptr);
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_idx);
- dex::TypeIndex type_idx = class_def.class_idx_;
- bool result = profile_compilation_info_->ContainsClass(dex_file, type_idx);
- if (kDebugProfileGuidedCompilation) {
- LOG(INFO) << "[ProfileGuidedCompilation] " << (result ? "Verified" : "Skipped") << " method:"
- << dex_file.GetClassDescriptor(class_def);
- }
- return result;
-}
-
class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
public:
explicit ResolveCatchBlockExceptionsClassVisitor(
@@ -1949,6 +1966,31 @@
DCHECK(!it.HasNext());
}
+static void LoadAndUpdateStatus(const DexFile& dex_file,
+ const DexFile::ClassDef& class_def,
+ mirror::Class::Status status,
+ Handle<mirror::ClassLoader> class_loader,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ StackHandleScope<1> hs(self);
+ const char* descriptor = dex_file.GetClassDescriptor(class_def);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Handle<mirror::Class> cls(hs.NewHandle<mirror::Class>(
+ class_linker->FindClass(self, descriptor, class_loader)));
+ if (cls.Get() != nullptr) {
+ // Check that the class is resolved with the current dex file. We might get
+ // a boot image class, or a class in a different dex file for multidex, and
+ // we should not update the status in that case.
+ if (&cls->GetDexFile() == &dex_file) {
+ ObjectLock<mirror::Class> lock(self, cls);
+ mirror::Class::SetStatus(cls, status, self);
+ }
+ } else {
+ DCHECK(self->IsExceptionPending());
+ self->ClearException();
+ }
+}
+
bool CompilerDriver::FastVerify(jobject jclass_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings) {
@@ -1963,12 +2005,12 @@
StackHandleScope<2> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
- MutableHandle<mirror::Class> cls(hs.NewHandle<mirror::Class>(nullptr));
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
if (!verifier_deps->ValidateDependencies(class_loader, soa.Self())) {
return false;
}
+ bool compiler_only_verifies = !GetCompilerOptions().IsAnyMethodCompilationEnabled();
+
// We successfully validated the dependencies, now update class status
// of verified classes. Note that the dependencies also record which classes
// could not be fully verified; we could try again, but that would hurt verification
@@ -1983,28 +2025,16 @@
for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
if (set.find(class_def.class_idx_) == set.end()) {
- if (!GetCompilerOptions().IsAnyMethodCompilationEnabled()) {
+ if (compiler_only_verifies) {
// Just update the compiled_classes_ map. The compiler doesn't need to resolve
// the type.
compiled_classes_.Overwrite(
ClassReference(dex_file, i), new CompiledClass(mirror::Class::kStatusVerified));
} else {
- // Resolve the type, so later compilation stages know they don't need to verify
+ // Update the class status, so later compilation stages know they don't need to verify
// the class.
- const char* descriptor = dex_file->GetClassDescriptor(class_def);
- cls.Assign(class_linker->FindClass(soa.Self(), descriptor, class_loader));
- if (cls.Get() != nullptr) {
- // Check that the class is resolved with the current dex file. We might get
- // a boot image class, or a class in a different dex file for multidex, and
- // we should not update the status in that case.
- if (&cls->GetDexFile() == dex_file) {
- ObjectLock<mirror::Class> lock(soa.Self(), cls);
- mirror::Class::SetStatus(cls, mirror::Class::kStatusVerified, soa.Self());
- }
- } else {
- DCHECK(soa.Self()->IsExceptionPending());
- soa.Self()->ClearException();
- }
+ LoadAndUpdateStatus(
+ *dex_file, class_def, mirror::Class::kStatusVerified, class_loader, soa.Self());
// Create `VerifiedMethod`s for each methods, the compiler expects one for
// quickening or compiling.
// Note that this means:
@@ -2013,6 +2043,14 @@
// TODO(ngeoffray): Reconsider this once we refactor compiler filters.
PopulateVerifiedMethods(*dex_file, i, verification_results_);
}
+ } else if (!compiler_only_verifies) {
+ // Make sure later compilation stages know they should not try to verify
+ // this class again.
+ LoadAndUpdateStatus(*dex_file,
+ class_def,
+ mirror::Class::kStatusRetryVerificationAtRuntime,
+ class_loader,
+ soa.Self());
}
}
}
@@ -2077,13 +2115,6 @@
ATRACE_CALL();
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *manager_->GetDexFile();
- if (!manager_->GetCompiler()->ShouldVerifyClassBasedOnProfile(dex_file, class_def_index)) {
- // Skip verification since the class is not in the profile, and let the VerifierDeps know
- // that the class will need to be verified at runtime.
- verifier::VerifierDeps::MaybeRecordVerificationStatus(
- dex_file, dex::TypeIndex(class_def_index), verifier::MethodVerifier::kSoftFailure);
- return;
- }
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ClassLinker* class_linker = manager_->GetClassLinker();
@@ -2199,7 +2230,7 @@
if (klass.Get() != nullptr) {
// Only do this if the class is resolved. If even resolution fails, quickening will go very,
// very wrong.
- if (klass->IsResolved()) {
+ if (klass->IsResolved() && !klass->IsErroneousResolved()) {
if (klass->GetStatus() < mirror::Class::kStatusVerified) {
ObjectLock<mirror::Class> lock(soa.Self(), klass);
// Set class status to verified.
@@ -2626,7 +2657,8 @@
void CompilerDriver::RecordClassStatus(ClassReference ref, mirror::Class::Status status) {
switch (status) {
case mirror::Class::kStatusNotReady:
- case mirror::Class::kStatusError:
+ case mirror::Class::kStatusErrorResolved:
+ case mirror::Class::kStatusErrorUnresolved:
case mirror::Class::kStatusRetryVerificationAtRuntime:
case mirror::Class::kStatusVerified:
case mirror::Class::kStatusInitialized:
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 503fe3a..5b4c751 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -233,27 +233,6 @@
ArtField* resolved_field, uint16_t field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Can we fast-path an SGET/SPUT access to a static field? If yes, compute the type index
- // of the declaring class in the referrer's dex file.
- std::pair<bool, bool> IsFastStaticField(mirror::DexCache* dex_cache,
- mirror::Class* referrer_class,
- ArtField* resolved_field,
- uint16_t field_idx,
- dex::TypeIndex* storage_index)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Return whether the declaring class of `resolved_method` is
- // available to `referrer_class`. If this is true, compute the type
- // index of the declaring class in the referrer's dex file and
- // return it through the out argument `storage_index`; otherwise
- // return DexFile::kDexNoIndex through `storage_index`.
- bool IsClassOfStaticMethodAvailableToReferrer(mirror::DexCache* dex_cache,
- mirror::Class* referrer_class,
- ArtMethod* resolved_method,
- uint16_t method_idx,
- dex::TypeIndex* storage_index)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Resolve a method. Returns null on failure, including incompatible class change.
ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
@@ -379,21 +358,6 @@
}
private:
- // Return whether the declaring class of `resolved_member` is
- // available to `referrer_class` for read or write access using two
- // Boolean values returned as a pair. If is true at least for read
- // access, compute the type index of the declaring class in the
- // referrer's dex file and return it through the out argument
- // `storage_index`; otherwise return DexFile::kDexNoIndex through
- // `storage_index`.
- template <typename ArtMember>
- std::pair<bool, bool> IsClassOfStaticMemberAvailableToReferrer(mirror::DexCache* dex_cache,
- mirror::Class* referrer_class,
- ArtMember* resolved_member,
- uint16_t member_idx,
- dex::TypeIndex* storage_index)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Can `referrer_class` access the resolved `member`?
// Dispatch call to mirror::Class::CanAccessResolvedField or
// mirror::Class::CanAccessResolvedMember depending on the value of
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 459aca3..c72edb1 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -756,7 +756,7 @@
bool my_early_exit = false; // Only for ourselves, ignore caller.
// Remove classes that failed to verify since we don't want to have java.lang.VerifyError in the
// app image.
- if (klass->GetStatus() == mirror::Class::kStatusError) {
+ if (klass->IsErroneous()) {
result = true;
} else {
ObjPtr<mirror::ClassExt> ext(klass->GetExtData());
@@ -777,8 +777,8 @@
visited);
}
// Check static fields and their classes.
- size_t num_static_fields = klass->NumReferenceStaticFields();
- if (num_static_fields != 0 && klass->IsResolved()) {
+ if (klass->IsResolved() && klass->NumReferenceStaticFields() != 0) {
+ size_t num_static_fields = klass->NumReferenceStaticFields();
// Presumably GC can happen when we are cross compiling, it should not cause performance
// problems to do pointer size logic.
MemberOffset field_offset = klass->GetFirstReferenceStaticFieldOffset(
@@ -1154,7 +1154,7 @@
// Visit and assign offsets for fields and field arrays.
mirror::Class* as_klass = obj->AsClass();
mirror::DexCache* dex_cache = as_klass->GetDexCache();
- DCHECK_NE(as_klass->GetStatus(), mirror::Class::kStatusError);
+ DCHECK(!as_klass->IsErroneous()) << as_klass->GetStatus();
if (compile_app_image_) {
// Extra sanity, no boot loader classes should be left!
CHECK(!IsBootClassLoaderClass(as_klass)) << as_klass->PrettyClass();
@@ -2348,6 +2348,16 @@
void ImageWriter::CopyAndFixupMethod(ArtMethod* orig,
ArtMethod* copy,
const ImageInfo& image_info) {
+ if (orig->IsAbstract()) {
+ // Ignore the single-implementation info for abstract method.
+ // Do this on orig instead of copy, otherwise there is a crash due to methods
+ // are copied before classes.
+ // TODO: handle fixup of single-implementation method for abstract method.
+ orig->SetHasSingleImplementation(false);
+ orig->SetSingleImplementation(
+ nullptr, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
+ }
+
memcpy(copy, orig, ArtMethod::Size(target_ptr_size_));
copy->SetDeclaringClass(GetImageAddress(orig->GetDeclaringClassUnchecked()));
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 148ce4f..cbd831a 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -102,7 +102,7 @@
/* no_inline_from */ nullptr,
/* include_patch_information */ false,
CompilerOptions::kDefaultTopKProfileThreshold,
- Runtime::Current()->IsDebuggable(),
+ Runtime::Current()->IsJavaDebuggable(),
CompilerOptions::kDefaultGenerateDebugInfo,
/* implicit_null_checks */ true,
/* implicit_so_checks */ true,
@@ -211,7 +211,7 @@
JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
success = compiler_driver_->GetCompiler()->JitCompile(self, code_cache, method, osr);
if (success && (jit_logger_ != nullptr)) {
- jit_logger_->WriteLog(code_cache, method);
+ jit_logger_->WriteLog(code_cache, method, osr);
}
}
diff --git a/compiler/jit/jit_logger.cc b/compiler/jit/jit_logger.cc
index 9ce3b0c..aa4f667 100644
--- a/compiler/jit/jit_logger.cc
+++ b/compiler/jit/jit_logger.cc
@@ -23,6 +23,7 @@
#include "driver/compiler_driver.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
+#include "oat_file-inl.h"
namespace art {
namespace jit {
@@ -49,9 +50,10 @@
}
}
-void JitLogger::WritePerfMapLog(JitCodeCache* code_cache, ArtMethod* method) {
+void JitLogger::WritePerfMapLog(JitCodeCache* code_cache, ArtMethod* method, bool osr) {
if (perf_file_ != nullptr) {
- const void* ptr = method->GetEntryPointFromQuickCompiledCode();
+ const void* ptr = osr ? code_cache->LookupOsrMethodHeader(method)->GetCode()
+ : method->GetEntryPointFromQuickCompiledCode();
size_t code_size = code_cache->GetMemorySizeOfCodePointer(ptr);
std::string method_name = method->PrettyMethod();
@@ -268,9 +270,10 @@
WriteJitDumpHeader();
}
-void JitLogger::WriteJitDumpLog(JitCodeCache* code_cache, ArtMethod* method) {
+void JitLogger::WriteJitDumpLog(JitCodeCache* code_cache, ArtMethod* method, bool osr) {
if (jit_dump_file_ != nullptr) {
- const void* code = method->GetEntryPointFromQuickCompiledCode();
+ const void* code = osr ? code_cache->LookupOsrMethodHeader(method)->GetCode()
+ : method->GetEntryPointFromQuickCompiledCode();
size_t code_size = code_cache->GetMemorySizeOfCodePointer(code);
std::string method_name = method->PrettyMethod();
diff --git a/compiler/jit/jit_logger.h b/compiler/jit/jit_logger.h
index 0f8cfe4..460864e 100644
--- a/compiler/jit/jit_logger.h
+++ b/compiler/jit/jit_logger.h
@@ -94,10 +94,10 @@
OpenJitDumpLog();
}
- void WriteLog(JitCodeCache* code_cache, ArtMethod* method)
+ void WriteLog(JitCodeCache* code_cache, ArtMethod* method, bool osr)
REQUIRES_SHARED(Locks::mutator_lock_) {
- WritePerfMapLog(code_cache, method);
- WriteJitDumpLog(code_cache, method);
+ WritePerfMapLog(code_cache, method, osr);
+ WriteJitDumpLog(code_cache, method, osr);
}
void CloseLog() {
@@ -108,13 +108,13 @@
private:
// For perf-map profiling
void OpenPerfMapLog();
- void WritePerfMapLog(JitCodeCache* code_cache, ArtMethod* method)
+ void WritePerfMapLog(JitCodeCache* code_cache, ArtMethod* method, bool osr)
REQUIRES_SHARED(Locks::mutator_lock_);
void ClosePerfMapLog();
// For perf-inject profiling
void OpenJitDumpLog();
- void WriteJitDumpLog(JitCodeCache* code_cache, ArtMethod* method)
+ void WriteJitDumpLog(JitCodeCache* code_cache, ArtMethod* method, bool osr)
REQUIRES_SHARED(Locks::mutator_lock_);
void CloseJitDumpLog();
diff --git a/compiler/linker/mips/relative_patcher_mips.cc b/compiler/linker/mips/relative_patcher_mips.cc
index c09950c..fe5f9a9 100644
--- a/compiler/linker/mips/relative_patcher_mips.cc
+++ b/compiler/linker/mips/relative_patcher_mips.cc
@@ -49,9 +49,12 @@
uint32_t target_offset) {
uint32_t anchor_literal_offset = patch.PcInsnOffset();
uint32_t literal_offset = patch.LiteralOffset();
+ uint32_t literal_low_offset;
bool dex_cache_array = (patch.GetType() == LinkerPatch::Type::kDexCacheArray);
- // Basic sanity checks.
+ // Perform basic sanity checks and initialize `literal_low_offset` to point
+ // to the instruction containing the 16 least significant bits of the
+ // relative address.
if (is_r6) {
DCHECK_GE(code->size(), 8u);
DCHECK_LE(literal_offset, code->size() - 8u);
@@ -61,10 +64,10 @@
DCHECK_EQ((*code)[literal_offset + 1], 0x12);
DCHECK_EQ(((*code)[literal_offset + 2] & 0x1F), 0x1E);
DCHECK_EQ(((*code)[literal_offset + 3] & 0xFC), 0xEC);
- // ADDIU reg, reg, offset_low
+ // instr reg(s), offset_low
DCHECK_EQ((*code)[literal_offset + 4], 0x78);
DCHECK_EQ((*code)[literal_offset + 5], 0x56);
- DCHECK_EQ(((*code)[literal_offset + 7] & 0xFC), 0x24);
+ literal_low_offset = literal_offset + 4;
} else {
DCHECK_GE(code->size(), 16u);
DCHECK_LE(literal_offset, code->size() - 12u);
@@ -84,36 +87,34 @@
DCHECK_EQ((*code)[literal_offset + 1], 0x12);
DCHECK_EQ(((*code)[literal_offset + 2] & 0xE0), 0x00);
DCHECK_EQ((*code)[literal_offset + 3], 0x3C);
- // ORI reg, reg, offset_low
- DCHECK_EQ((*code)[literal_offset + 4], 0x78);
- DCHECK_EQ((*code)[literal_offset + 5], 0x56);
- DCHECK_EQ(((*code)[literal_offset + 7] & 0xFC), 0x34);
// ADDU reg, reg, reg2
- DCHECK_EQ((*code)[literal_offset + 8], 0x21);
- DCHECK_EQ(((*code)[literal_offset + 9] & 0x07), 0x00);
+ DCHECK_EQ((*code)[literal_offset + 4], 0x21);
+ DCHECK_EQ(((*code)[literal_offset + 5] & 0x07), 0x00);
if (dex_cache_array) {
// reg2 is either RA or from HMipsComputeBaseMethodAddress.
- DCHECK_EQ(((*code)[literal_offset + 10] & 0x1F), 0x1F);
+ DCHECK_EQ(((*code)[literal_offset + 6] & 0x1F), 0x1F);
}
- DCHECK_EQ(((*code)[literal_offset + 11] & 0xFC), 0x00);
+ DCHECK_EQ(((*code)[literal_offset + 7] & 0xFC), 0x00);
+ // instr reg(s), offset_low
+ DCHECK_EQ((*code)[literal_offset + 8], 0x78);
+ DCHECK_EQ((*code)[literal_offset + 9], 0x56);
+ literal_low_offset = literal_offset + 8;
}
// Apply patch.
uint32_t anchor_offset = patch_offset - literal_offset + anchor_literal_offset;
uint32_t diff = target_offset - anchor_offset;
- if (dex_cache_array) {
+ if (dex_cache_array && !is_r6) {
diff += kDexCacheArrayLwOffset;
}
- if (is_r6) {
- diff += (diff & 0x8000) << 1; // Account for sign extension in ADDIU.
- }
+ diff += (diff & 0x8000) << 1; // Account for sign extension in "instr reg(s), offset_low".
// LUI reg, offset_high / AUIPC reg, offset_high
(*code)[literal_offset + 0] = static_cast<uint8_t>(diff >> 16);
(*code)[literal_offset + 1] = static_cast<uint8_t>(diff >> 24);
- // ORI reg, reg, offset_low / ADDIU reg, reg, offset_low
- (*code)[literal_offset + 4] = static_cast<uint8_t>(diff >> 0);
- (*code)[literal_offset + 5] = static_cast<uint8_t>(diff >> 8);
+ // instr reg(s), offset_low
+ (*code)[literal_low_offset + 0] = static_cast<uint8_t>(diff >> 0);
+ (*code)[literal_low_offset + 1] = static_cast<uint8_t>(diff >> 8);
}
} // namespace linker
diff --git a/compiler/linker/mips/relative_patcher_mips32r6_test.cc b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
index 4f9a3a0..474eb73 100644
--- a/compiler/linker/mips/relative_patcher_mips32r6_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
@@ -20,10 +20,6 @@
namespace art {
namespace linker {
-// We'll maximize the range of a single load instruction for dex cache array accesses
-// by aligning offset -32768 with the offset of the first used element.
-static constexpr uint32_t kDexCacheArrayLwOffset = 0x8000;
-
class Mips32r6RelativePatcherTest : public RelativePatcherTest {
public:
Mips32r6RelativePatcherTest() : RelativePatcherTest(kMips, "mips32r6") {}
@@ -64,9 +60,6 @@
ASSERT_TRUE(result.first);
uint32_t diff = target_offset - (result.second + kAnchorOffset);
- if (patches[0].GetType() == LinkerPatch::Type::kDexCacheArray) {
- diff += kDexCacheArrayLwOffset;
- }
diff += (diff & 0x8000) << 1; // Account for sign extension in addiu.
const uint8_t expected_code[] = {
diff --git a/compiler/linker/mips/relative_patcher_mips_test.cc b/compiler/linker/mips/relative_patcher_mips_test.cc
index faeb92a..b0d1294 100644
--- a/compiler/linker/mips/relative_patcher_mips_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips_test.cc
@@ -47,12 +47,12 @@
const uint8_t MipsRelativePatcherTest::kUnpatchedPcRelativeRawCode[] = {
0x00, 0x00, 0x10, 0x04, // nal
- 0x34, 0x12, 0x12, 0x3C, // lui s2, high(diff); placeholder = 0x1234
- 0x78, 0x56, 0x52, 0x36, // ori s2, s2, low(diff); placeholder = 0x5678
- 0x21, 0x90, 0x5F, 0x02, // addu s2, s2, ra
+ 0x34, 0x12, 0x12, 0x3C, // lui s2, high(diff); placeholder = 0x1234
+ 0x21, 0x90, 0x5F, 0x02, // addu s2, s2, ra
+ 0x78, 0x56, 0x52, 0x26, // addiu s2, s2, low(diff); placeholder = 0x5678
};
const uint32_t MipsRelativePatcherTest::kLiteralOffset = 4; // At lui (where patching starts).
-const uint32_t MipsRelativePatcherTest::kAnchorOffset = 8; // At ori (where PC+0 points).
+const uint32_t MipsRelativePatcherTest::kAnchorOffset = 8; // At addu (where PC+0 points).
const ArrayRef<const uint8_t> MipsRelativePatcherTest::kUnpatchedPcRelativeCode(
kUnpatchedPcRelativeRawCode);
@@ -68,12 +68,13 @@
if (patches[0].GetType() == LinkerPatch::Type::kDexCacheArray) {
diff += kDexCacheArrayLwOffset;
}
+ diff += (diff & 0x8000) << 1; // Account for sign extension in addiu.
const uint8_t expected_code[] = {
0x00, 0x00, 0x10, 0x04,
static_cast<uint8_t>(diff >> 16), static_cast<uint8_t>(diff >> 24), 0x12, 0x3C,
- static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), 0x52, 0x36,
0x21, 0x90, 0x5F, 0x02,
+ static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), 0x52, 0x26,
};
EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
}
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 34b33a1..d5842a8 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -487,7 +487,7 @@
EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(20U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(157 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
+ EXPECT_EQ(161 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
sizeof(QuickEntryPoints));
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index de5af97..a16a34b 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -716,7 +716,10 @@
if (compiled_class != nullptr) {
status = compiled_class->GetStatus();
} else if (writer_->compiler_driver_->GetVerificationResults()->IsClassRejected(class_ref)) {
- status = mirror::Class::kStatusError;
+ // The oat class status is used only for verification of resolved classes,
+ // so use kStatusErrorResolved whether the class was resolved or unresolved
+ // during compile-time verification.
+ status = mirror::Class::kStatusErrorResolved;
} else {
status = mirror::Class::kStatusNotReady;
}
@@ -2263,6 +2266,10 @@
File* raw_file = oat_dex_file->source_.GetRawFile();
dex_file = DexFile::OpenDex(raw_file->Fd(), location, /* verify_checksum */ true, &error_msg);
}
+ if (dex_file == nullptr) {
+ LOG(ERROR) << "Failed to open dex file for layout:" << error_msg;
+ return false;
+ }
Options options;
options.output_to_memmap_ = true;
DexLayout dex_layout(options, profile_compilation_info_, nullptr);
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 8cf4089..e4ad422 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -32,6 +32,8 @@
namespace art {
+class CodeGenerator;
+
class HGraphBuilder : public ValueObject {
public:
HGraphBuilder(HGraph* graph,
@@ -40,6 +42,7 @@
const DexFile* dex_file,
const DexFile::CodeItem& code_item,
CompilerDriver* driver,
+ CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
const uint8_t* interpreter_metadata,
Handle<mirror::DexCache> dex_cache,
@@ -61,6 +64,7 @@
dex_compilation_unit,
outer_compilation_unit,
driver,
+ code_generator,
interpreter_metadata,
compiler_stats,
dex_cache,
@@ -89,6 +93,7 @@
/* dex_compilation_unit */ nullptr,
/* outer_compilation_unit */ nullptr,
/* compiler_driver */ nullptr,
+ /* code_generator */ nullptr,
/* interpreter_metadata */ nullptr,
/* compiler_stats */ nullptr,
null_dex_cache_,
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 99427f0..d68aa51 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1417,4 +1417,22 @@
EmitJitRootPatches(code, roots_data);
}
+QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass) {
+ ScopedObjectAccess soa(Thread::Current());
+ if (array_klass.Get() == nullptr) {
+ // This can only happen for non-primitive arrays, as primitive arrays can always
+ // be resolved.
+ return kQuickAllocArrayResolved32;
+ }
+
+ switch (array_klass->GetComponentSize()) {
+ case 1: return kQuickAllocArrayResolved8;
+ case 2: return kQuickAllocArrayResolved16;
+ case 4: return kQuickAllocArrayResolved32;
+ case 8: return kQuickAllocArrayResolved64;
+ }
+ LOG(FATAL) << "Unreachable";
+ return kQuickAllocArrayResolved;
+}
+
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 2d129af..b912672 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -573,6 +573,8 @@
uint32_t GetReferenceSlowFlagOffset() const;
uint32_t GetReferenceDisableFlagOffset() const;
+ static QuickEntrypointEnum GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass);
+
protected:
// Patch info used for recording locations of required linker patches and their targets,
// i.e. target method, string, type or code identified by their dex file and index.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index f5b6ebe..20cdae3 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -3993,8 +3993,11 @@
void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ QuickEntrypointEnum entrypoint =
+ CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
+ DCHECK(!codegen_->IsLeafMethod());
}
void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
@@ -5719,6 +5722,9 @@
HLoadClass::LoadKind CodeGeneratorARM::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -5849,6 +5855,7 @@
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 9762ee8..598be47 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1452,6 +1452,19 @@
(cst->IsDoubleConstant() && type == Primitive::kPrimDouble);
}
+// Allocate a scratch register from the VIXL pool, querying first into
+// the floating-point register pool, and then the the core register
+// pool. This is essentially a reimplementation of
+// vixl::aarch64::UseScratchRegisterScope::AcquireCPURegisterOfSize
+// using a different allocation strategy.
+static CPURegister AcquireFPOrCoreCPURegisterOfSize(vixl::aarch64::MacroAssembler* masm,
+ vixl::aarch64::UseScratchRegisterScope* temps,
+ int size_in_bits) {
+ return masm->GetScratchFPRegisterList()->IsEmpty()
+ ? CPURegister(temps->AcquireRegisterOfSize(size_in_bits))
+ : CPURegister(temps->AcquireVRegisterOfSize(size_in_bits));
+}
+
void CodeGeneratorARM64::MoveLocation(Location destination,
Location source,
Primitive::Type dst_type) {
@@ -1533,7 +1546,9 @@
HConstant* src_cst = source.GetConstant();
CPURegister temp;
if (src_cst->IsZeroBitPattern()) {
- temp = (src_cst->IsLongConstant() || src_cst->IsDoubleConstant()) ? xzr : wzr;
+ temp = (src_cst->IsLongConstant() || src_cst->IsDoubleConstant())
+ ? Register(xzr)
+ : Register(wzr);
} else {
if (src_cst->IsIntConstant()) {
temp = temps.AcquireW();
@@ -1561,8 +1576,16 @@
// a move is blocked by a another move requiring a scratch FP
// register, which would reserve D31). To prevent this issue, we
// ask for a scratch register of any type (core or FP).
- CPURegister temp =
- temps.AcquireCPURegisterOfSize(destination.IsDoubleStackSlot() ? kXRegSize : kWRegSize);
+ //
+ // Also, we start by asking for a FP scratch register first, as the
+ // demand of scratch core registers is higher. This is why we
+ // use AcquireFPOrCoreCPURegisterOfSize instead of
+ // UseScratchRegisterScope::AcquireCPURegisterOfSize, which
+ // allocates core scratch registers first.
+ CPURegister temp = AcquireFPOrCoreCPURegisterOfSize(
+ GetVIXLAssembler(),
+ &temps,
+ (destination.IsDoubleStackSlot() ? kXRegSize : kWRegSize));
__ Ldr(temp, StackOperandFrom(source));
__ Str(temp, StackOperandFrom(destination));
}
@@ -1903,6 +1926,9 @@
LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ // We need a temporary register for the read barrier marking slow
+ // path in CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier.
+ locations->AddTemp(Location::RequiresRegister());
}
locations->SetInAt(0, Location::RequiresRegister());
if (Primitive::IsFloatingPointType(instruction->GetType())) {
@@ -1930,11 +1956,9 @@
if (field_type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// Object FieldGet with Baker's read barrier case.
- MacroAssembler* masm = GetVIXLAssembler();
- UseScratchRegisterScope temps(masm);
// /* HeapReference<Object> */ out = *(base + offset)
Register base = RegisterFrom(base_loc, Primitive::kPrimNot);
- Register temp = temps.AcquireW();
+ Register temp = WRegisterFrom(locations->GetTemp(0));
// Note that potential implicit null checks are handled in this
// CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(
@@ -4336,6 +4360,9 @@
HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -4474,6 +4501,7 @@
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
@@ -4762,7 +4790,9 @@
void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ QuickEntrypointEnum entrypoint =
+ CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 7d3c655..f6cb90a 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -210,12 +210,11 @@
Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return helpers::LocationFrom(vixl::aarch64::x0);
}
- Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
- return Primitive::Is64BitType(type)
+ Location GetSetValueLocation(Primitive::Type type ATTRIBUTE_UNUSED,
+ bool is_instance) const OVERRIDE {
+ return is_instance
? helpers::LocationFrom(vixl::aarch64::x2)
- : (is_instance
- ? helpers::LocationFrom(vixl::aarch64::x2)
- : helpers::LocationFrom(vixl::aarch64::x1));
+ : helpers::LocationFrom(vixl::aarch64::x1);
}
Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return helpers::LocationFrom(vixl::aarch64::d0);
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 2ef145b..e189608 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3998,15 +3998,18 @@
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetOut(LocationFrom(r0));
- locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
}
void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ QuickEntrypointEnum entrypoint =
+ CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
+ DCHECK(!codegen_->IsLeafMethod());
}
void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
@@ -5796,6 +5799,9 @@
HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -5916,6 +5922,7 @@
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
@@ -7678,15 +7685,21 @@
vixl32::Register jump_offset = temps.Acquire();
// Load jump offset from the table.
- __ Adr(table_base, jump_table->GetTableStartLabel());
- __ Ldr(jump_offset, MemOperand(table_base, key_reg, vixl32::LSL, 2));
+ {
+ const size_t jump_size = switch_instr->GetNumEntries() * sizeof(int32_t);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ (vixl32::kMaxInstructionSizeInBytes * 4) + jump_size,
+ CodeBufferCheckScope::kMaximumSize);
+ __ adr(table_base, jump_table->GetTableStartLabel());
+ __ ldr(jump_offset, MemOperand(table_base, key_reg, vixl32::LSL, 2));
- // Jump to target block by branching to table_base(pc related) + offset.
- vixl32::Register target_address = table_base;
- __ Add(target_address, table_base, jump_offset);
- __ Bx(target_address);
+ // Jump to target block by branching to table_base(pc related) + offset.
+ vixl32::Register target_address = table_base;
+ __ add(target_address, table_base, jump_offset);
+ __ bx(target_address);
- jump_table->EmitTable(codegen_);
+ jump_table->EmitTable(codegen_);
+ }
}
}
void LocationsBuilderARMVIXL::VisitArmDexCacheArraysBase(HArmDexCacheArraysBase* base) {
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 76be74e..0677dad 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -258,8 +258,10 @@
DCHECK_NE(out.AsRegister<Register>(), AT);
CodeGeneratorMIPS::PcRelativePatchInfo* info =
mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- mips_codegen->EmitPcRelativeAddressPlaceholder(info, TMP, base);
- __ StoreToOffset(kStoreWord, out.AsRegister<Register>(), TMP, 0);
+ bool reordering = __ SetReorder(false);
+ mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info, TMP, base);
+ __ StoreToOffset(kStoreWord, out.AsRegister<Register>(), TMP, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
}
__ B(GetExitLabel());
}
@@ -313,8 +315,10 @@
DCHECK_NE(out, AT);
CodeGeneratorMIPS::PcRelativePatchInfo* info =
mips_codegen->NewPcRelativeStringPatch(load->GetDexFile(), string_index);
- mips_codegen->EmitPcRelativeAddressPlaceholder(info, TMP, base);
- __ StoreToOffset(kStoreWord, out, TMP, 0);
+ bool reordering = __ SetReorder(false);
+ mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info, TMP, base);
+ __ StoreToOffset(kStoreWord, out, TMP, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
__ B(GetExitLabel());
}
@@ -480,6 +484,8 @@
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
clobbered_ra_(false) {
// Save RA (containing the return address) to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(RA));
@@ -700,9 +706,6 @@
// (this can happen in leaf methods), force CodeGenerator::InitializeCodeGeneration()
// into the path that creates a stack frame so that RA can be explicitly saved and restored.
// RA can't otherwise be saved/restored when it's the only spilled register.
- // TODO: Can this be improved? It causes creation of a stack frame (while RA might be
- // saved in an unused temporary register) and saving of RA and the current method pointer
- // in the frame.
return CodeGenerator::HasAllocatedCalleeSaveRegisters() || clobbered_ra_;
}
@@ -1127,16 +1130,15 @@
return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), map);
}
-void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholder(
- PcRelativePatchInfo* info, Register out, Register base) {
- bool reordering = __ SetReorder(false);
+void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info,
+ Register out,
+ Register base) {
if (GetInstructionSetFeatures().IsR6()) {
DCHECK_EQ(base, ZERO);
__ Bind(&info->high_label);
__ Bind(&info->pc_rel_label);
- // Add a 32-bit offset to PC.
+ // Add the high half of a 32-bit offset to PC.
__ Auipc(out, /* placeholder */ 0x1234);
- __ Addiu(out, out, /* placeholder */ 0x5678);
} else {
// If base is ZERO, emit NAL to obtain the actual base.
if (base == ZERO) {
@@ -1150,11 +1152,72 @@
if (base == ZERO) {
__ Bind(&info->pc_rel_label);
}
- __ Ori(out, out, /* placeholder */ 0x5678);
- // Add a 32-bit offset to PC.
+ // Add the high half of a 32-bit offset to PC.
__ Addu(out, out, (base == ZERO) ? RA : base);
}
- __ SetReorder(reordering);
+ // The immediately following instruction will add the sign-extended low half of the 32-bit
+ // offset to `out` (e.g. lw, jialc, addiu).
+}
+
+CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch(
+ const DexFile& dex_file,
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
+ jit_string_patches_.emplace_back(dex_file, dex_index.index_);
+ return &jit_string_patches_.back();
+}
+
+CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootClassPatch(
+ const DexFile& dex_file,
+ dex::TypeIndex dex_index,
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
+ jit_class_patches_.emplace_back(dex_file, dex_index.index_);
+ return &jit_class_patches_.back();
+}
+
+void CodeGeneratorMIPS::PatchJitRootUse(uint8_t* code,
+ const uint8_t* roots_data,
+ const CodeGeneratorMIPS::JitPatchInfo& info,
+ uint64_t index_in_table) const {
+ uint32_t literal_offset = GetAssembler().GetLabelLocation(&info.high_label);
+ uintptr_t address =
+ reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
+ uint32_t addr32 = dchecked_integral_cast<uint32_t>(address);
+ // lui reg, addr32_high
+ DCHECK_EQ(code[literal_offset + 0], 0x34);
+ DCHECK_EQ(code[literal_offset + 1], 0x12);
+ DCHECK_EQ((code[literal_offset + 2] & 0xE0), 0x00);
+ DCHECK_EQ(code[literal_offset + 3], 0x3C);
+ // lw reg, reg, addr32_low
+ DCHECK_EQ(code[literal_offset + 4], 0x78);
+ DCHECK_EQ(code[literal_offset + 5], 0x56);
+ DCHECK_EQ((code[literal_offset + 7] & 0xFC), 0x8C);
+ addr32 += (addr32 & 0x8000) << 1; // Account for sign extension in "lw reg, reg, addr32_low".
+ // lui reg, addr32_high
+ code[literal_offset + 0] = static_cast<uint8_t>(addr32 >> 16);
+ code[literal_offset + 1] = static_cast<uint8_t>(addr32 >> 24);
+ // lw reg, reg, addr32_low
+ code[literal_offset + 4] = static_cast<uint8_t>(addr32 >> 0);
+ code[literal_offset + 5] = static_cast<uint8_t>(addr32 >> 8);
+}
+
+void CodeGeneratorMIPS::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
+ for (const JitPatchInfo& info : jit_string_patches_) {
+ const auto& it = jit_string_roots_.find(StringReference(&info.target_dex_file,
+ dex::StringIndex(info.index)));
+ DCHECK(it != jit_string_roots_.end());
+ PatchJitRootUse(code, roots_data, info, it->second);
+ }
+ for (const JitPatchInfo& info : jit_class_patches_) {
+ const auto& it = jit_class_roots_.find(TypeReference(&info.target_dex_file,
+ dex::TypeIndex(info.index)));
+ DCHECK(it != jit_class_roots_.end());
+ PatchJitRootUse(code, roots_data, info, it->second);
+ }
}
void CodeGeneratorMIPS::MarkGCCard(Register object,
@@ -5159,7 +5222,8 @@
// art::PrepareForRegisterAllocation.
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
- bool has_extra_input = invoke->HasPcRelativeDexCache();
+ bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
+ bool has_extra_input = invoke->HasPcRelativeDexCache() && !is_r6;
IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
@@ -5200,12 +5264,13 @@
if (kEmitCompilerReadBarrier) {
UNIMPLEMENTED(FATAL) << "for read barrier";
}
- // We disable PC-relative load when there is an irreducible loop, as the optimization
+ // We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
// is incompatible with it.
// TODO: Create as many MipsDexCacheArraysBase instructions as needed for methods
// with irreducible loops.
bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
- bool fallback_load = has_irreducible_loops;
+ bool is_r6 = GetInstructionSetFeatures().IsR6();
+ bool fallback_load = has_irreducible_loops && !is_r6;
switch (desired_string_load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
DCHECK(!GetCompilerOptions().GetCompilePic());
@@ -5220,8 +5285,7 @@
break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
- // TODO: implement.
- fallback_load = true;
+ fallback_load = false;
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
fallback_load = false;
@@ -5238,11 +5302,15 @@
if (kEmitCompilerReadBarrier) {
UNIMPLEMENTED(FATAL) << "for read barrier";
}
- // We disable pc-relative load when there is an irreducible loop, as the optimization
+ // We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
// is incompatible with it.
bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
- bool fallback_load = has_irreducible_loops;
+ bool is_r6 = GetInstructionSetFeatures().IsR6();
+ bool fallback_load = has_irreducible_loops && !is_r6;
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
fallback_load = false;
break;
@@ -5259,7 +5327,7 @@
break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
- fallback_load = true;
+ fallback_load = false;
break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
fallback_load = false;
@@ -5273,6 +5341,7 @@
Register CodeGeneratorMIPS::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
Register temp) {
+ CHECK(!GetInstructionSetFeatures().IsR6());
CHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
Location location = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
if (!invoke->GetLocations()->Intrinsified()) {
@@ -5301,13 +5370,13 @@
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
- // We disable PC-relative load when there is an irreducible loop, as the optimization
+ // We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
// is incompatible with it.
bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
- bool fallback_load = true;
+ bool is_r6 = GetInstructionSetFeatures().IsR6();
+ bool fallback_load = has_irreducible_loops && !is_r6;
switch (dispatch_info.method_load_kind) {
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- fallback_load = has_irreducible_loops;
break;
default:
fallback_load = false;
@@ -5325,7 +5394,8 @@
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
HInvokeStaticOrDirect::CodePtrLocation code_ptr_location = invoke->GetCodePtrLocation();
- Register base_reg = invoke->HasPcRelativeDexCache()
+ bool is_r6 = GetInstructionSetFeatures().IsR6();
+ Register base_reg = (invoke->HasPcRelativeDexCache() && !is_r6)
? GetInvokeStaticOrDirectExtraParameter(invoke, temp.AsRegister<Register>())
: ZERO;
@@ -5346,14 +5416,23 @@
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
__ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
break;
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
- HMipsDexCacheArraysBase* base =
- invoke->InputAt(invoke->GetSpecialInputIndex())->AsMipsDexCacheArraysBase();
- int32_t offset =
- invoke->GetDexCacheArrayOffset() - base->GetElementOffset() - kDexCacheArrayLwOffset;
- __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), base_reg, offset);
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
+ if (is_r6) {
+ uint32_t offset = invoke->GetDexCacheArrayOffset();
+ CodeGeneratorMIPS::PcRelativePatchInfo* info =
+ NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset);
+ bool reordering = __ SetReorder(false);
+ EmitPcRelativeAddressPlaceholderHigh(info, TMP, ZERO);
+ __ Lw(temp.AsRegister<Register>(), TMP, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
+ } else {
+ HMipsDexCacheArraysBase* base =
+ invoke->InputAt(invoke->GetSpecialInputIndex())->AsMipsDexCacheArraysBase();
+ int32_t offset =
+ invoke->GetDexCacheArrayOffset() - base->GetElementOffset() - kDexCacheArrayLwOffset;
+ __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), base_reg, offset);
+ }
break;
- }
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
Register reg = temp.AsRegister<Register>();
@@ -5546,7 +5625,10 @@
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
- codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
+ bool reordering = __ SetReorder(false);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
+ __ Addiu(out, out, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
@@ -5562,16 +5644,26 @@
case HLoadClass::LoadKind::kBssEntry: {
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
- codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
- __ LoadFromOffset(kLoadWord, out, out, 0);
+ bool reordering = __ SetReorder(false);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
+ __ LoadFromOffset(kLoadWord, out, out, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
generate_null_check = true;
break;
}
case HLoadClass::LoadKind::kJitTableAddress: {
- LOG(FATAL) << "Unimplemented";
+ CodeGeneratorMIPS::JitPatchInfo* info = codegen_->NewJitRootClassPatch(cls->GetDexFile(),
+ cls->GetTypeIndex(),
+ cls->GetClass());
+ bool reordering = __ SetReorder(false);
+ __ Bind(&info->high_label);
+ __ Lui(out, /* placeholder */ 0x1234);
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
@@ -5678,7 +5770,10 @@
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
- codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
+ bool reordering = __ SetReorder(false);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
+ __ Addiu(out, out, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
@@ -5694,14 +5789,28 @@
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
- codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
- __ LoadFromOffset(kLoadWord, out, out, 0);
+ bool reordering = __ SetReorder(false);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
+ __ LoadFromOffset(kLoadWord, out, out, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
codegen_->AddSlowPath(slow_path);
__ Beqz(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
return;
}
+ case HLoadString::LoadKind::kJitTableAddress: {
+ CodeGeneratorMIPS::JitPatchInfo* info =
+ codegen_->NewJitRootStringPatch(load->GetDexFile(),
+ load->GetStringIndex(),
+ load->GetString());
+ bool reordering = __ SetReorder(false);
+ __ Bind(&info->high_label);
+ __ Lui(out, /* placeholder */ 0x1234);
+ GenerateGcRootFieldLoad(load, out_loc, out, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
+ return;
+ }
default:
break;
}
@@ -6894,8 +7003,12 @@
Register reg = base->GetLocations()->Out().AsRegister<Register>();
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeDexCacheArrayPatch(base->GetDexFile(), base->GetElementOffset());
+ CHECK(!codegen_->GetInstructionSetFeatures().IsR6());
+ bool reordering = __ SetReorder(false);
// TODO: Reuse MipsComputeBaseMethodAddress on R2 instead of passing ZERO to force emitting NAL.
- codegen_->EmitPcRelativeAddressPlaceholder(info, reg, ZERO);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info, reg, ZERO);
+ __ Addiu(reg, reg, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
}
void LocationsBuilderMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index c8fd325..47eba50 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -352,6 +352,7 @@
// Emit linker patches.
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
void MarkGCCard(Register object, Register value, bool value_can_be_null);
@@ -463,7 +464,32 @@
Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index);
Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
- void EmitPcRelativeAddressPlaceholder(PcRelativePatchInfo* info, Register out, Register base);
+ void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, Register out, Register base);
+
+ // The JitPatchInfo is used for JIT string and class loads.
+ struct JitPatchInfo {
+ JitPatchInfo(const DexFile& dex_file, uint64_t idx)
+ : target_dex_file(dex_file), index(idx) { }
+ JitPatchInfo(JitPatchInfo&& other) = default;
+
+ const DexFile& target_dex_file;
+ // String/type index.
+ uint64_t index;
+ // Label for the instruction loading the most significant half of the address.
+ // The least significant half is loaded with the instruction that follows immediately.
+ MipsLabel high_label;
+ };
+
+ void PatchJitRootUse(uint8_t* code,
+ const uint8_t* roots_data,
+ const JitPatchInfo& info,
+ uint64_t index_in_table) const;
+ JitPatchInfo* NewJitRootStringPatch(const DexFile& dex_file,
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle);
+ JitPatchInfo* NewJitRootClassPatch(const DexFile& dex_file,
+ dex::TypeIndex dex_index,
+ Handle<mirror::Class> handle);
private:
Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
@@ -512,6 +538,10 @@
ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
+ // Patches for string root accesses in JIT compiled code.
+ ArenaDeque<JitPatchInfo> jit_string_patches_;
+ // Patches for class root accesses in JIT compiled code.
+ ArenaDeque<JitPatchInfo> jit_class_patches_;
// PC-relative loads on R2 clobber RA, which may need to be preserved explicitly in leaf methods.
// This is a flag set by pc_relative_fixups_mips and dex_cache_array_fixups_mips optimizations.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 192b4a5..4c8dabf 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -91,9 +91,6 @@
// Space on the stack is reserved for all arguments.
stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
- // TODO: shouldn't we use a whole machine word per argument on the stack?
- // Implicit 4-byte method pointer (and such) will cause misalignment.
-
return next_location;
}
@@ -434,7 +431,11 @@
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ jit_string_patches_(StringReferenceValueComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(TypeReferenceValueComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Save RA (containing the return address) to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(RA));
}
@@ -1055,6 +1056,49 @@
// offset to `out` (e.g. ld, jialc, daddiu).
}
+Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_file,
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
+ return jit_string_patches_.GetOrCreate(
+ StringReference(&dex_file, string_index),
+ [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+}
+
+Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file,
+ dex::TypeIndex type_index,
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
+ return jit_class_patches_.GetOrCreate(
+ TypeReference(&dex_file, type_index),
+ [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+}
+
+void CodeGeneratorMIPS64::PatchJitRootUse(uint8_t* code,
+ const uint8_t* roots_data,
+ const Literal* literal,
+ uint64_t index_in_table) const {
+ uint32_t literal_offset = GetAssembler().GetLabelLocation(literal->GetLabel());
+ uintptr_t address =
+ reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
+ reinterpret_cast<uint32_t*>(code + literal_offset)[0] = dchecked_integral_cast<uint32_t>(address);
+}
+
+void CodeGeneratorMIPS64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
+ for (const auto& entry : jit_string_patches_) {
+ const auto& it = jit_string_roots_.find(entry.first);
+ DCHECK(it != jit_string_roots_.end());
+ PatchJitRootUse(code, roots_data, entry.second, it->second);
+ }
+ for (const auto& entry : jit_class_patches_) {
+ const auto& it = jit_class_roots_.find(entry.first);
+ DCHECK(it != jit_class_roots_.end());
+ PatchJitRootUse(code, roots_data, entry.second, it->second);
+ }
+}
+
void CodeGeneratorMIPS64::SetupBlockedRegisters() const {
// ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
blocked_core_registers_[ZERO] = true;
@@ -3117,14 +3161,6 @@
Location root,
GpuRegister obj,
uint32_t offset) {
- // When handling PC-relative loads, the caller calls
- // EmitPcRelativeAddressPlaceholderHigh() and then GenerateGcRootFieldLoad().
- // The relative patcher expects the two methods to emit the following patchable
- // sequence of instructions in this case:
- // auipc reg1, 0x1234 // 0x1234 is a placeholder for offset_high.
- // lwu reg2, 0x5678(reg1) // 0x5678 is a placeholder for offset_low.
- // TODO: Adjust GenerateGcRootFieldLoad() and its caller when this method is
- // extended (e.g. for read barriers) so as not to break the relative patcher.
GpuRegister root_reg = root.AsRegister<GpuRegister>();
if (kEmitCompilerReadBarrier) {
UNIMPLEMENTED(FATAL) << "for read barrier";
@@ -3317,8 +3353,6 @@
break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
- // TODO: implement.
- fallback_load = true;
break;
}
if (fallback_load) {
@@ -3334,6 +3368,9 @@
}
bool fallback_load = false;
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -3349,8 +3386,6 @@
break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
- // TODO: implement.
- fallback_load = true;
break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
@@ -3588,11 +3623,16 @@
generate_null_check = true;
break;
}
- case HLoadClass::LoadKind::kJitTableAddress: {
- LOG(FATAL) << "Unimplemented";
+ case HLoadClass::LoadKind::kJitTableAddress:
+ __ LoadLiteral(out,
+ kLoadUnsignedWord,
+ codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
+ cls->GetTypeIndex(),
+ cls->GetClass()));
+ GenerateGcRootFieldLoad(cls, out_loc, out, 0);
break;
- }
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
@@ -3693,6 +3733,14 @@
__ Bind(slow_path->GetExitLabel());
return;
}
+ case HLoadString::LoadKind::kJitTableAddress:
+ __ LoadLiteral(out,
+ kLoadUnsignedWord,
+ codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
+ load->GetStringIndex(),
+ load->GetString()));
+ GenerateGcRootFieldLoad(load, out_loc, out, 0);
+ return;
default:
break;
}
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 52b780c..26cc7dc 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -52,7 +52,7 @@
static constexpr GpuRegister kCoreCalleeSaves[] =
- { S0, S1, S2, S3, S4, S5, S6, S7, GP, S8, RA }; // TODO: review
+ { S0, S1, S2, S3, S4, S5, S6, S7, GP, S8, RA };
static constexpr FpuRegister kFpuCalleeSaves[] =
{ F24, F25, F26, F27, F28, F29, F30, F31 };
@@ -115,12 +115,11 @@
Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return Location::RegisterLocation(V0);
}
- Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
- return Primitive::Is64BitType(type)
+ Location GetSetValueLocation(Primitive::Type type ATTRIBUTE_UNUSED,
+ bool is_instance) const OVERRIDE {
+ return is_instance
? Location::RegisterLocation(A2)
- : (is_instance
- ? Location::RegisterLocation(A2)
- : Location::RegisterLocation(A1));
+ : Location::RegisterLocation(A1);
}
Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return Location::FpuRegisterLocation(F0);
@@ -313,6 +312,7 @@
// Emit linker patches.
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
void MarkGCCard(GpuRegister object, GpuRegister value, bool value_can_be_null);
@@ -426,10 +426,27 @@
void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, GpuRegister out);
+ void PatchJitRootUse(uint8_t* code,
+ const uint8_t* roots_data,
+ const Literal* literal,
+ uint64_t index_in_table) const;
+ Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle);
+ Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
+ dex::TypeIndex type_index,
+ Handle<mirror::Class> handle);
+
private:
using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>;
using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, Literal*>;
using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>;
+ using StringToLiteralMap = ArenaSafeMap<StringReference,
+ Literal*,
+ StringReferenceValueComparator>;
+ using TypeToLiteralMap = ArenaSafeMap<TypeReference,
+ Literal*,
+ TypeReferenceValueComparator>;
using BootStringToLiteralMap = ArenaSafeMap<StringReference,
Literal*,
StringReferenceValueComparator>;
@@ -477,6 +494,10 @@
ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
+ // Patches for string root accesses in JIT compiled code.
+ StringToLiteralMap jit_string_patches_;
+ // Patches for class root accesses in JIT compiled code.
+ TypeToLiteralMap jit_class_patches_;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS64);
};
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1b74316..137b554 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4214,7 +4214,9 @@
void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) {
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ QuickEntrypointEnum entrypoint =
+ CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
}
@@ -6022,6 +6024,9 @@
HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -6157,6 +6162,7 @@
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 7350fcc..5360dc9 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -110,7 +110,9 @@
}
Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
return Primitive::Is64BitType(type)
- ? Location::RegisterPairLocation(EDX, EBX)
+ ? (is_instance
+ ? Location::RegisterPairLocation(EDX, EBX)
+ : Location::RegisterPairLocation(ECX, EDX))
: (is_instance
? Location::RegisterLocation(EDX)
: Location::RegisterLocation(ECX));
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index c4caf4b..c5367ce 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -4096,7 +4096,9 @@
void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) {
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ QuickEntrypointEnum entrypoint =
+ CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
}
@@ -5425,6 +5427,9 @@
HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kInvalid:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 3438b81..3a83731 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -92,12 +92,11 @@
Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return Location::RegisterLocation(RAX);
}
- Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
- return Primitive::Is64BitType(type)
+ Location GetSetValueLocation(Primitive::Type type ATTRIBUTE_UNUSED, bool is_instance)
+ const OVERRIDE {
+ return is_instance
? Location::RegisterLocation(RDX)
- : (is_instance
- ? Location::RegisterLocation(RDX)
- : Location::RegisterLocation(RSI));
+ : Location::RegisterLocation(RSI);
}
Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return Location::FpuRegisterLocation(XMM0);
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index e3f3df0..f8bbf68 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -17,30 +17,15 @@
#include <functional>
#include <memory>
-#include "arch/instruction_set.h"
-#include "arch/arm/instruction_set_features_arm.h"
-#include "arch/arm/registers_arm.h"
-#include "arch/arm64/instruction_set_features_arm64.h"
-#include "arch/mips/instruction_set_features_mips.h"
-#include "arch/mips/registers_mips.h"
-#include "arch/mips64/instruction_set_features_mips64.h"
-#include "arch/mips64/registers_mips64.h"
-#include "arch/x86/instruction_set_features_x86.h"
-#include "arch/x86/registers_x86.h"
-#include "arch/x86_64/instruction_set_features_x86_64.h"
#include "base/macros.h"
#include "builder.h"
-#include "code_simulator_container.h"
-#include "common_compiler_test.h"
+#include "codegen_test_utils.h"
#include "dex_file.h"
#include "dex_instruction.h"
#include "driver/compiler_options.h"
-#include "graph_checker.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
-#include "prepare_for_register_allocation.h"
#include "register_allocator_linear_scan.h"
-#include "ssa_liveness_analysis.h"
#include "utils.h"
#include "utils/arm/assembler_arm_vixl.h"
#include "utils/arm/managed_register_arm.h"
@@ -48,324 +33,10 @@
#include "utils/mips64/managed_register_mips64.h"
#include "utils/x86/managed_register_x86.h"
-#ifdef ART_ENABLE_CODEGEN_arm
-#include "code_generator_arm.h"
-#include "code_generator_arm_vixl.h"
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_arm64
-#include "code_generator_arm64.h"
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_x86
-#include "code_generator_x86.h"
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_x86_64
-#include "code_generator_x86_64.h"
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_mips
-#include "code_generator_mips.h"
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_mips64
-#include "code_generator_mips64.h"
-#endif
-
#include "gtest/gtest.h"
namespace art {
-typedef CodeGenerator* (*CreateCodegenFn)(HGraph*, const CompilerOptions&);
-
-class CodegenTargetConfig {
- public:
- CodegenTargetConfig(InstructionSet isa, CreateCodegenFn create_codegen)
- : isa_(isa), create_codegen_(create_codegen) {
- }
- InstructionSet GetInstructionSet() const { return isa_; }
- CodeGenerator* CreateCodeGenerator(HGraph* graph, const CompilerOptions& compiler_options) {
- return create_codegen_(graph, compiler_options);
- }
-
- private:
- CodegenTargetConfig() {}
- InstructionSet isa_;
- CreateCodegenFn create_codegen_;
-};
-
-#ifdef ART_ENABLE_CODEGEN_arm
-// Provide our own codegen, that ensures the C calling conventions
-// are preserved. Currently, ART and C do not match as R4 is caller-save
-// in ART, and callee-save in C. Alternatively, we could use or write
-// the stub that saves and restores all registers, but it is easier
-// to just overwrite the code generator.
-class TestCodeGeneratorARM : public arm::CodeGeneratorARM {
- public:
- TestCodeGeneratorARM(HGraph* graph,
- const ArmInstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options)
- : arm::CodeGeneratorARM(graph, isa_features, compiler_options) {
- AddAllocatedRegister(Location::RegisterLocation(arm::R6));
- AddAllocatedRegister(Location::RegisterLocation(arm::R7));
- }
-
- void SetupBlockedRegisters() const OVERRIDE {
- arm::CodeGeneratorARM::SetupBlockedRegisters();
- blocked_core_registers_[arm::R4] = true;
- blocked_core_registers_[arm::R6] = false;
- blocked_core_registers_[arm::R7] = false;
- }
-};
-
-// A way to test the VIXL32-based code generator on ARM. This will replace
-// TestCodeGeneratorARM when the VIXL32-based backend replaces the existing one.
-class TestCodeGeneratorARMVIXL : public arm::CodeGeneratorARMVIXL {
- public:
- TestCodeGeneratorARMVIXL(HGraph* graph,
- const ArmInstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options)
- : arm::CodeGeneratorARMVIXL(graph, isa_features, compiler_options) {
- AddAllocatedRegister(Location::RegisterLocation(arm::R6));
- AddAllocatedRegister(Location::RegisterLocation(arm::R7));
- }
-
- void SetupBlockedRegisters() const OVERRIDE {
- arm::CodeGeneratorARMVIXL::SetupBlockedRegisters();
- blocked_core_registers_[arm::R4] = true;
- blocked_core_registers_[arm::R6] = false;
- blocked_core_registers_[arm::R7] = false;
- }
-};
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_x86
-class TestCodeGeneratorX86 : public x86::CodeGeneratorX86 {
- public:
- TestCodeGeneratorX86(HGraph* graph,
- const X86InstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options)
- : x86::CodeGeneratorX86(graph, isa_features, compiler_options) {
- // Save edi, we need it for getting enough registers for long multiplication.
- AddAllocatedRegister(Location::RegisterLocation(x86::EDI));
- }
-
- void SetupBlockedRegisters() const OVERRIDE {
- x86::CodeGeneratorX86::SetupBlockedRegisters();
- // ebx is a callee-save register in C, but caller-save for ART.
- blocked_core_registers_[x86::EBX] = true;
-
- // Make edi available.
- blocked_core_registers_[x86::EDI] = false;
- }
-};
-#endif
-
-class InternalCodeAllocator : public CodeAllocator {
- public:
- InternalCodeAllocator() : size_(0) { }
-
- virtual uint8_t* Allocate(size_t size) {
- size_ = size;
- memory_.reset(new uint8_t[size]);
- return memory_.get();
- }
-
- size_t GetSize() const { return size_; }
- uint8_t* GetMemory() const { return memory_.get(); }
-
- private:
- size_t size_;
- std::unique_ptr<uint8_t[]> memory_;
-
- DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator);
-};
-
-static bool CanExecuteOnHardware(InstructionSet target_isa) {
- return (target_isa == kRuntimeISA)
- // Handle the special case of ARM, with two instructions sets (ARM32 and Thumb-2).
- || (kRuntimeISA == kArm && target_isa == kThumb2);
-}
-
-static bool CanExecute(InstructionSet target_isa) {
- CodeSimulatorContainer simulator(target_isa);
- return CanExecuteOnHardware(target_isa) || simulator.CanSimulate();
-}
-
-template <typename Expected>
-static Expected SimulatorExecute(CodeSimulator* simulator, Expected (*f)());
-
-template <>
-bool SimulatorExecute<bool>(CodeSimulator* simulator, bool (*f)()) {
- simulator->RunFrom(reinterpret_cast<intptr_t>(f));
- return simulator->GetCReturnBool();
-}
-
-template <>
-int32_t SimulatorExecute<int32_t>(CodeSimulator* simulator, int32_t (*f)()) {
- simulator->RunFrom(reinterpret_cast<intptr_t>(f));
- return simulator->GetCReturnInt32();
-}
-
-template <>
-int64_t SimulatorExecute<int64_t>(CodeSimulator* simulator, int64_t (*f)()) {
- simulator->RunFrom(reinterpret_cast<intptr_t>(f));
- return simulator->GetCReturnInt64();
-}
-
-template <typename Expected>
-static void VerifyGeneratedCode(InstructionSet target_isa,
- Expected (*f)(),
- bool has_result,
- Expected expected) {
- ASSERT_TRUE(CanExecute(target_isa)) << "Target isa is not executable.";
-
- // Verify on simulator.
- CodeSimulatorContainer simulator(target_isa);
- if (simulator.CanSimulate()) {
- Expected result = SimulatorExecute<Expected>(simulator.Get(), f);
- if (has_result) {
- ASSERT_EQ(expected, result);
- }
- }
-
- // Verify on hardware.
- if (CanExecuteOnHardware(target_isa)) {
- Expected result = f();
- if (has_result) {
- ASSERT_EQ(expected, result);
- }
- }
-}
-
-template <typename Expected>
-static void Run(const InternalCodeAllocator& allocator,
- const CodeGenerator& codegen,
- bool has_result,
- Expected expected) {
- InstructionSet target_isa = codegen.GetInstructionSet();
-
- typedef Expected (*fptr)();
- CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize());
- fptr f = reinterpret_cast<fptr>(allocator.GetMemory());
- if (target_isa == kThumb2) {
- // For thumb we need the bottom bit set.
- f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(f) + 1);
- }
- VerifyGeneratedCode(target_isa, f, has_result, expected);
-}
-
-static void ValidateGraph(HGraph* graph) {
- GraphChecker graph_checker(graph);
- graph_checker.Run();
- if (!graph_checker.IsValid()) {
- for (const auto& error : graph_checker.GetErrors()) {
- std::cout << error << std::endl;
- }
- }
- ASSERT_TRUE(graph_checker.IsValid());
-}
-
-template <typename Expected>
-static void RunCodeNoCheck(CodeGenerator* codegen,
- HGraph* graph,
- const std::function<void(HGraph*)>& hook_before_codegen,
- bool has_result,
- Expected expected) {
- SsaLivenessAnalysis liveness(graph, codegen);
- PrepareForRegisterAllocation(graph).Run();
- liveness.Analyze();
- RegisterAllocator::Create(graph->GetArena(), codegen, liveness)->AllocateRegisters();
- hook_before_codegen(graph);
- InternalCodeAllocator allocator;
- codegen->Compile(&allocator);
- Run(allocator, *codegen, has_result, expected);
-}
-
-template <typename Expected>
-static void RunCode(CodeGenerator* codegen,
- HGraph* graph,
- std::function<void(HGraph*)> hook_before_codegen,
- bool has_result,
- Expected expected) {
- ValidateGraph(graph);
- RunCodeNoCheck(codegen, graph, hook_before_codegen, has_result, expected);
-}
-
-template <typename Expected>
-static void RunCode(CodegenTargetConfig target_config,
- HGraph* graph,
- std::function<void(HGraph*)> hook_before_codegen,
- bool has_result,
- Expected expected) {
- CompilerOptions compiler_options;
- std::unique_ptr<CodeGenerator> codegen(target_config.CreateCodeGenerator(graph, compiler_options));
- RunCode(codegen.get(), graph, hook_before_codegen, has_result, expected);
-}
-
-#ifdef ART_ENABLE_CODEGEN_arm
-CodeGenerator* create_codegen_arm(HGraph* graph, const CompilerOptions& compiler_options) {
- std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
- ArmInstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena()) TestCodeGeneratorARM(graph,
- *features_arm.get(),
- compiler_options);
-}
-
-CodeGenerator* create_codegen_arm_vixl32(HGraph* graph, const CompilerOptions& compiler_options) {
- std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
- ArmInstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
- TestCodeGeneratorARMVIXL(graph, *features_arm.get(), compiler_options);
-}
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_arm64
-CodeGenerator* create_codegen_arm64(HGraph* graph, const CompilerOptions& compiler_options) {
- std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64(
- Arm64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena()) arm64::CodeGeneratorARM64(graph,
- *features_arm64.get(),
- compiler_options);
-}
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_x86
-CodeGenerator* create_codegen_x86(HGraph* graph, const CompilerOptions& compiler_options) {
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena()) TestCodeGeneratorX86(graph, *features_x86.get(), compiler_options);
-}
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_x86_64
-CodeGenerator* create_codegen_x86_64(HGraph* graph, const CompilerOptions& compiler_options) {
- std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64(
- X86_64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
- x86_64::CodeGeneratorX86_64(graph, *features_x86_64.get(), compiler_options);
-}
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_mips
-CodeGenerator* create_codegen_mips(HGraph* graph, const CompilerOptions& compiler_options) {
- std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
- MipsInstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
- mips::CodeGeneratorMIPS(graph, *features_mips.get(), compiler_options);
-}
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_mips64
-CodeGenerator* create_codegen_mips64(HGraph* graph, const CompilerOptions& compiler_options) {
- std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
- Mips64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
- mips64::CodeGeneratorMIPS64(graph, *features_mips64.get(), compiler_options);
-}
-#endif
-
// Return all combinations of ISA and code generator that are executable on
// hardware, or on simulator, and that we'd like to test.
static ::std::vector<CodegenTargetConfig> GetTargetConfigs() {
@@ -1067,6 +738,39 @@
}
#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
+// Regression test for b/34760542.
+TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) {
+ std::unique_ptr<const Arm64InstructionSetFeatures> features(
+ Arm64InstructionSetFeatures::FromCppDefines());
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph(&allocator);
+ arm64::CodeGeneratorARM64 codegen(graph, *features.get(), CompilerOptions());
+
+ codegen.Initialize();
+
+ // The following ParallelMove used to fail this assertion:
+ //
+ // Assertion failed (!available->IsEmpty())
+ //
+ // in vixl::aarch64::UseScratchRegisterScope::AcquireNextAvailable.
+ HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ move->AddMove(Location::DoubleStackSlot(0),
+ Location::DoubleStackSlot(257),
+ Primitive::kPrimDouble,
+ nullptr);
+ move->AddMove(Location::DoubleStackSlot(257),
+ Location::DoubleStackSlot(0),
+ Primitive::kPrimDouble,
+ nullptr);
+ codegen.GetMoveResolver()->EmitNativeCode(move);
+
+ InternalCodeAllocator code_allocator;
+ codegen.Finalize(&code_allocator);
+}
+#endif
+
#ifdef ART_ENABLE_CODEGEN_mips
TEST_F(CodegenTest, MipsClobberRA) {
std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
new file mode 100644
index 0000000..cd95404
--- /dev/null
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CODEGEN_TEST_UTILS_H_
+#define ART_COMPILER_OPTIMIZING_CODEGEN_TEST_UTILS_H_
+
+#include "arch/arm/instruction_set_features_arm.h"
+#include "arch/arm/registers_arm.h"
+#include "arch/arm64/instruction_set_features_arm64.h"
+#include "arch/instruction_set.h"
+#include "arch/mips/instruction_set_features_mips.h"
+#include "arch/mips/registers_mips.h"
+#include "arch/mips64/instruction_set_features_mips64.h"
+#include "arch/mips64/registers_mips64.h"
+#include "arch/x86/instruction_set_features_x86.h"
+#include "arch/x86/registers_x86.h"
+#include "arch/x86_64/instruction_set_features_x86_64.h"
+#include "code_simulator_container.h"
+#include "common_compiler_test.h"
+#include "graph_checker.h"
+#include "prepare_for_register_allocation.h"
+#include "ssa_liveness_analysis.h"
+
+#ifdef ART_ENABLE_CODEGEN_arm
+#include "code_generator_arm.h"
+#include "code_generator_arm_vixl.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_arm64
+#include "code_generator_arm64.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_x86
+#include "code_generator_x86.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_x86_64
+#include "code_generator_x86_64.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_mips
+#include "code_generator_mips.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_mips64
+#include "code_generator_mips64.h"
+#endif
+
+namespace art {
+
+typedef CodeGenerator* (*CreateCodegenFn)(HGraph*, const CompilerOptions&);
+
+class CodegenTargetConfig {
+ public:
+ CodegenTargetConfig(InstructionSet isa, CreateCodegenFn create_codegen)
+ : isa_(isa), create_codegen_(create_codegen) {
+ }
+ InstructionSet GetInstructionSet() const { return isa_; }
+ CodeGenerator* CreateCodeGenerator(HGraph* graph, const CompilerOptions& compiler_options) {
+ return create_codegen_(graph, compiler_options);
+ }
+
+ private:
+ CodegenTargetConfig() {}
+ InstructionSet isa_;
+ CreateCodegenFn create_codegen_;
+};
+
+#ifdef ART_ENABLE_CODEGEN_arm
+// Provide our own codegen, that ensures the C calling conventions
+// are preserved. Currently, ART and C do not match as R4 is caller-save
+// in ART, and callee-save in C. Alternatively, we could use or write
+// the stub that saves and restores all registers, but it is easier
+// to just overwrite the code generator.
+class TestCodeGeneratorARM : public arm::CodeGeneratorARM {
+ public:
+ TestCodeGeneratorARM(HGraph* graph,
+ const ArmInstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options)
+ : arm::CodeGeneratorARM(graph, isa_features, compiler_options) {
+ AddAllocatedRegister(Location::RegisterLocation(arm::R6));
+ AddAllocatedRegister(Location::RegisterLocation(arm::R7));
+ }
+
+ void SetupBlockedRegisters() const OVERRIDE {
+ arm::CodeGeneratorARM::SetupBlockedRegisters();
+ blocked_core_registers_[arm::R4] = true;
+ blocked_core_registers_[arm::R6] = false;
+ blocked_core_registers_[arm::R7] = false;
+ }
+};
+
+// A way to test the VIXL32-based code generator on ARM. This will replace
+// TestCodeGeneratorARM when the VIXL32-based backend replaces the existing one.
+class TestCodeGeneratorARMVIXL : public arm::CodeGeneratorARMVIXL {
+ public:
+ TestCodeGeneratorARMVIXL(HGraph* graph,
+ const ArmInstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options)
+ : arm::CodeGeneratorARMVIXL(graph, isa_features, compiler_options) {
+ AddAllocatedRegister(Location::RegisterLocation(arm::R6));
+ AddAllocatedRegister(Location::RegisterLocation(arm::R7));
+ }
+
+ void SetupBlockedRegisters() const OVERRIDE {
+ arm::CodeGeneratorARMVIXL::SetupBlockedRegisters();
+ blocked_core_registers_[arm::R4] = true;
+ blocked_core_registers_[arm::R6] = false;
+ blocked_core_registers_[arm::R7] = false;
+ }
+};
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_x86
+class TestCodeGeneratorX86 : public x86::CodeGeneratorX86 {
+ public:
+ TestCodeGeneratorX86(HGraph* graph,
+ const X86InstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options)
+ : x86::CodeGeneratorX86(graph, isa_features, compiler_options) {
+ // Save edi, we need it for getting enough registers for long multiplication.
+ AddAllocatedRegister(Location::RegisterLocation(x86::EDI));
+ }
+
+ void SetupBlockedRegisters() const OVERRIDE {
+ x86::CodeGeneratorX86::SetupBlockedRegisters();
+ // ebx is a callee-save register in C, but caller-save for ART.
+ blocked_core_registers_[x86::EBX] = true;
+
+ // Make edi available.
+ blocked_core_registers_[x86::EDI] = false;
+ }
+};
+#endif
+
+class InternalCodeAllocator : public CodeAllocator {
+ public:
+ InternalCodeAllocator() : size_(0) { }
+
+ virtual uint8_t* Allocate(size_t size) {
+ size_ = size;
+ memory_.reset(new uint8_t[size]);
+ return memory_.get();
+ }
+
+ size_t GetSize() const { return size_; }
+ uint8_t* GetMemory() const { return memory_.get(); }
+
+ private:
+ size_t size_;
+ std::unique_ptr<uint8_t[]> memory_;
+
+ DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator);
+};
+
+static bool CanExecuteOnHardware(InstructionSet target_isa) {
+ return (target_isa == kRuntimeISA)
+ // Handle the special case of ARM, with two instructions sets (ARM32 and Thumb-2).
+ || (kRuntimeISA == kArm && target_isa == kThumb2);
+}
+
+static bool CanExecute(InstructionSet target_isa) {
+ CodeSimulatorContainer simulator(target_isa);
+ return CanExecuteOnHardware(target_isa) || simulator.CanSimulate();
+}
+
+template <typename Expected>
+inline static Expected SimulatorExecute(CodeSimulator* simulator, Expected (*f)());
+
+template <>
+inline bool SimulatorExecute<bool>(CodeSimulator* simulator, bool (*f)()) {
+ simulator->RunFrom(reinterpret_cast<intptr_t>(f));
+ return simulator->GetCReturnBool();
+}
+
+template <>
+inline int32_t SimulatorExecute<int32_t>(CodeSimulator* simulator, int32_t (*f)()) {
+ simulator->RunFrom(reinterpret_cast<intptr_t>(f));
+ return simulator->GetCReturnInt32();
+}
+
+template <>
+inline int64_t SimulatorExecute<int64_t>(CodeSimulator* simulator, int64_t (*f)()) {
+ simulator->RunFrom(reinterpret_cast<intptr_t>(f));
+ return simulator->GetCReturnInt64();
+}
+
+template <typename Expected>
+static void VerifyGeneratedCode(InstructionSet target_isa,
+ Expected (*f)(),
+ bool has_result,
+ Expected expected) {
+ ASSERT_TRUE(CanExecute(target_isa)) << "Target isa is not executable.";
+
+ // Verify on simulator.
+ CodeSimulatorContainer simulator(target_isa);
+ if (simulator.CanSimulate()) {
+ Expected result = SimulatorExecute<Expected>(simulator.Get(), f);
+ if (has_result) {
+ ASSERT_EQ(expected, result);
+ }
+ }
+
+ // Verify on hardware.
+ if (CanExecuteOnHardware(target_isa)) {
+ Expected result = f();
+ if (has_result) {
+ ASSERT_EQ(expected, result);
+ }
+ }
+}
+
+template <typename Expected>
+static void Run(const InternalCodeAllocator& allocator,
+ const CodeGenerator& codegen,
+ bool has_result,
+ Expected expected) {
+ InstructionSet target_isa = codegen.GetInstructionSet();
+
+ typedef Expected (*fptr)();
+ CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize());
+ fptr f = reinterpret_cast<fptr>(allocator.GetMemory());
+ if (target_isa == kThumb2) {
+ // For thumb we need the bottom bit set.
+ f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(f) + 1);
+ }
+ VerifyGeneratedCode(target_isa, f, has_result, expected);
+}
+
+static void ValidateGraph(HGraph* graph) {
+ GraphChecker graph_checker(graph);
+ graph_checker.Run();
+ if (!graph_checker.IsValid()) {
+ for (const auto& error : graph_checker.GetErrors()) {
+ std::cout << error << std::endl;
+ }
+ }
+ ASSERT_TRUE(graph_checker.IsValid());
+}
+
+template <typename Expected>
+static void RunCodeNoCheck(CodeGenerator* codegen,
+ HGraph* graph,
+ const std::function<void(HGraph*)>& hook_before_codegen,
+ bool has_result,
+ Expected expected) {
+ SsaLivenessAnalysis liveness(graph, codegen);
+ PrepareForRegisterAllocation(graph).Run();
+ liveness.Analyze();
+ RegisterAllocator::Create(graph->GetArena(), codegen, liveness)->AllocateRegisters();
+ hook_before_codegen(graph);
+ InternalCodeAllocator allocator;
+ codegen->Compile(&allocator);
+ Run(allocator, *codegen, has_result, expected);
+}
+
+template <typename Expected>
+static void RunCode(CodeGenerator* codegen,
+ HGraph* graph,
+ std::function<void(HGraph*)> hook_before_codegen,
+ bool has_result,
+ Expected expected) {
+ ValidateGraph(graph);
+ RunCodeNoCheck(codegen, graph, hook_before_codegen, has_result, expected);
+}
+
+template <typename Expected>
+static void RunCode(CodegenTargetConfig target_config,
+ HGraph* graph,
+ std::function<void(HGraph*)> hook_before_codegen,
+ bool has_result,
+ Expected expected) {
+ CompilerOptions compiler_options;
+ std::unique_ptr<CodeGenerator> codegen(target_config.CreateCodeGenerator(graph, compiler_options));
+ RunCode(codegen.get(), graph, hook_before_codegen, has_result, expected);
+}
+
+#ifdef ART_ENABLE_CODEGEN_arm
+CodeGenerator* create_codegen_arm(HGraph* graph, const CompilerOptions& compiler_options) {
+ std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
+ ArmInstructionSetFeatures::FromCppDefines());
+ return new (graph->GetArena()) TestCodeGeneratorARM(graph,
+ *features_arm.get(),
+ compiler_options);
+}
+
+CodeGenerator* create_codegen_arm_vixl32(HGraph* graph, const CompilerOptions& compiler_options) {
+ std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
+ ArmInstructionSetFeatures::FromCppDefines());
+ return new (graph->GetArena())
+ TestCodeGeneratorARMVIXL(graph, *features_arm.get(), compiler_options);
+}
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_arm64
+CodeGenerator* create_codegen_arm64(HGraph* graph, const CompilerOptions& compiler_options) {
+ std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64(
+ Arm64InstructionSetFeatures::FromCppDefines());
+ return new (graph->GetArena()) arm64::CodeGeneratorARM64(graph,
+ *features_arm64.get(),
+ compiler_options);
+}
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_x86
+CodeGenerator* create_codegen_x86(HGraph* graph, const CompilerOptions& compiler_options) {
+ std::unique_ptr<const X86InstructionSetFeatures> features_x86(
+ X86InstructionSetFeatures::FromCppDefines());
+ return new (graph->GetArena()) TestCodeGeneratorX86(graph, *features_x86.get(), compiler_options);
+}
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_x86_64
+CodeGenerator* create_codegen_x86_64(HGraph* graph, const CompilerOptions& compiler_options) {
+ std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64(
+ X86_64InstructionSetFeatures::FromCppDefines());
+ return new (graph->GetArena())
+ x86_64::CodeGeneratorX86_64(graph, *features_x86_64.get(), compiler_options);
+}
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_mips
+CodeGenerator* create_codegen_mips(HGraph* graph, const CompilerOptions& compiler_options) {
+ std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
+ MipsInstructionSetFeatures::FromCppDefines());
+ return new (graph->GetArena())
+ mips::CodeGeneratorMIPS(graph, *features_mips.get(), compiler_options);
+}
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_mips64
+CodeGenerator* create_codegen_mips64(HGraph* graph, const CompilerOptions& compiler_options) {
+ std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
+ Mips64InstructionSetFeatures::FromCppDefines());
+ return new (graph->GetArena())
+ mips64::CodeGeneratorMIPS64(graph, *features_mips64.get(), compiler_options);
+}
+#endif
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CODEGEN_TEST_UTILS_H_
diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h
index 21c3ae6..ecb8687 100644
--- a/compiler/optimizing/common_arm.h
+++ b/compiler/optimizing/common_arm.h
@@ -146,6 +146,12 @@
return InputRegisterAt(instr, 0);
}
+inline vixl::aarch32::DRegister DRegisterFromS(vixl::aarch32::SRegister s) {
+ vixl::aarch32::DRegister d = vixl::aarch32::DRegister(s.GetCode() / 2);
+ DCHECK(s.Is(d.GetLane(0)) || s.Is(d.GetLane(1)));
+ return d;
+}
+
inline int32_t Int32ConstantFrom(HInstruction* instr) {
if (instr->IsIntConstant()) {
return instr->AsIntConstant()->GetValue();
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 776a483..93ea090 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -130,8 +130,8 @@
Primitive::Type input_type = input->GetType();
if (input->IsConstant() && input->AsConstant()->IsZeroBitPattern()) {
return (Primitive::ComponentSize(input_type) >= vixl::aarch64::kXRegSizeInBytes)
- ? vixl::aarch64::xzr
- : vixl::aarch64::wzr;
+ ? vixl::aarch64::Register(vixl::aarch64::xzr)
+ : vixl::aarch64::Register(vixl::aarch64::wzr);
}
return InputCPURegisterAt(instr, index);
}
diff --git a/compiler/optimizing/dex_cache_array_fixups_mips.cc b/compiler/optimizing/dex_cache_array_fixups_mips.cc
index 04a4294..7734f91 100644
--- a/compiler/optimizing/dex_cache_array_fixups_mips.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_mips.cc
@@ -47,7 +47,7 @@
// Computing the dex cache base for PC-relative accesses will clobber RA with
// the NAL instruction on R2. Take a note of this before generating the method
// entry.
- if (!dex_cache_array_bases_.empty() && !codegen_->GetInstructionSetFeatures().IsR6()) {
+ if (!dex_cache_array_bases_.empty()) {
codegen_->ClobberRA();
}
}
@@ -92,6 +92,11 @@
};
void DexCacheArrayFixups::Run() {
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen_);
+ if (mips_codegen->GetInstructionSetFeatures().IsR6()) {
+ // Do nothing for R6 because it has PC-relative addressing.
+ return;
+ }
if (graph_->HasIrreducibleLoops()) {
// Do not run this optimization, as irreducible loops do not work with an instruction
// that can be live-in at the irreducible loop header.
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 3973985..5539413 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -57,14 +57,18 @@
return false;
}
-/** Returns b^e for b,e >= 1. */
-static int64_t IntPow(int64_t b, int64_t e) {
+/** Returns b^e for b,e >= 1. Sets overflow if arithmetic wrap-around occurred. */
+static int64_t IntPow(int64_t b, int64_t e, /*out*/ bool* overflow) {
DCHECK_GE(b, 1);
DCHECK_GE(e, 1);
int64_t pow = 1;
while (e) {
if (e & 1) {
+ int64_t oldpow = pow;
pow *= b;
+ if (pow < oldpow) {
+ *overflow = true;
+ }
}
e >>= 1;
b *= b;
@@ -1020,20 +1024,27 @@
HInstruction* opb = nullptr;
if (GenerateCode(info->op_a, nullptr, graph, block, &opa, false, false) &&
GenerateCode(info->op_b, nullptr, graph, block, &opb, false, false)) {
- // Compute f ^ m for known maximum index value m.
- int64_t fpow = IntPow(f, m);
if (graph != nullptr) {
- DCHECK(info->operation == HInductionVarAnalysis::kMul ||
- info->operation == HInductionVarAnalysis::kDiv);
Primitive::Type type = info->type;
+ // Compute f ^ m for known maximum index value m.
+ bool overflow = false;
+ int64_t fpow = IntPow(f, m, &overflow);
+ if (info->operation == HInductionVarAnalysis::kDiv) {
+ // For division, any overflow truncates to zero.
+ if (overflow || (type != Primitive::kPrimLong && !CanLongValueFitIntoInt(fpow))) {
+ fpow = 0;
+ }
+ } else if (type != Primitive::kPrimLong) {
+ // For multiplication, okay to truncate to required precision.
+ DCHECK(info->operation == HInductionVarAnalysis::kMul);
+ fpow = static_cast<int32_t>(fpow);
+ }
+ // Generate code.
if (fpow == 0) {
// Special case: repeated mul/div always yields zero.
*result = graph->GetConstant(type, 0);
} else {
// Last value: a * f ^ m + b or a * f ^ -m + b.
- if (type != Primitive::kPrimLong) {
- fpow = static_cast<int32_t>(fpow); // okay to truncate
- }
HInstruction* e = nullptr;
if (info->operation == HInductionVarAnalysis::kMul) {
e = new (graph->GetArena()) HMul(type, opa, graph->GetConstant(type, fpow));
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 5d40f75..f0afccb 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -304,7 +304,8 @@
// We do not support HDeoptimize in OSR methods.
return nullptr;
}
- return resolved_method->GetSingleImplementation();
+ PointerSize pointer_size = caller_compilation_unit_.GetClassLinker()->GetImagePointerSize();
+ return resolved_method->GetSingleImplementation(pointer_size);
}
bool HInliner::TryInline(HInvoke* invoke_instruction) {
@@ -557,9 +558,13 @@
is_referrer,
invoke_instruction->GetDexPc(),
/* needs_access_check */ false);
+ HLoadClass::LoadKind kind = HSharpening::SharpenClass(
+ load_class, codegen_, compiler_driver_, caller_compilation_unit_);
+ DCHECK(kind != HLoadClass::LoadKind::kInvalid)
+ << "We should always be able to reference a class for inline caches";
+ // Insert before setting the kind, as setting the kind affects the inputs.
bb_cursor->InsertInstructionAfter(load_class, receiver_class);
- // Sharpen after adding the instruction, as the sharpening may remove inputs.
- HSharpening::SharpenClass(load_class, codegen_, compiler_driver_);
+ load_class->SetLoadKind(kind);
// TODO: Extend reference type propagation to understand the guard.
HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class);
@@ -1285,6 +1290,7 @@
resolved_method->GetDexFile(),
*code_item,
compiler_driver_,
+ codegen_,
inline_stats.get(),
resolved_method->GetQuickenedInfo(class_linker->GetImagePointerSize()),
dex_cache,
@@ -1415,10 +1421,13 @@
return false;
}
- if (!same_dex_file && current->NeedsEnvironment()) {
+ if (current->NeedsEnvironment() &&
+ !CanEncodeInlinedMethodInStackMap(*caller_compilation_unit_.GetDexFile(),
+ resolved_method)) {
VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because " << current->DebugName()
- << " needs an environment and is in a different dex file";
+ << " needs an environment, is in a different dex file"
+ << ", and cannot be encoded in the stack maps.";
return false;
}
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index cac385c..a1c391f 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -22,6 +22,7 @@
#include "dex_instruction-inl.h"
#include "driver/compiler_options.h"
#include "imtable-inl.h"
+#include "sharpening.h"
#include "scoped_thread_state_change-inl.h"
namespace art {
@@ -847,7 +848,7 @@
ScopedObjectAccess soa(Thread::Current());
if (invoke_type == kStatic) {
clinit_check = ProcessClinitCheckForInvoke(
- dex_pc, resolved_method, method_idx, &clinit_check_requirement);
+ dex_pc, resolved_method, &clinit_check_requirement);
} else if (invoke_type == kSuper) {
if (IsSameDexFile(*resolved_method->GetDexFile(), *dex_compilation_unit_->GetDexFile())) {
// Update the method index to the one resolved. Note that this may be a no-op if
@@ -933,15 +934,8 @@
bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t dex_pc) {
ScopedObjectAccess soa(Thread::Current());
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
- if (outer_dex_cache.Get() != dex_cache.Get()) {
- // We currently do not support inlining allocations across dex files.
- return false;
- }
-
- HLoadClass* load_class = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ HLoadClass* load_class = BuildLoadClass(type_index, dex_pc);
HInstruction* cls = load_class;
Handle<mirror::Class> klass = load_class->GetClass();
@@ -1005,39 +999,23 @@
HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke(
uint32_t dex_pc,
ArtMethod* resolved_method,
- uint32_t method_idx,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) {
- Thread* self = Thread::Current();
- StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
- Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
- Handle<mirror::Class> resolved_method_class(hs.NewHandle(resolved_method->GetDeclaringClass()));
-
- // The index at which the method's class is stored in the DexCache's type array.
- dex::TypeIndex storage_index;
- bool is_outer_class = (resolved_method->GetDeclaringClass() == outer_class.Get());
- if (is_outer_class) {
- storage_index = outer_class->GetDexTypeIndex();
- } else if (outer_dex_cache.Get() == dex_cache.Get()) {
- // Get `storage_index` from IsClassOfStaticMethodAvailableToReferrer.
- compiler_driver_->IsClassOfStaticMethodAvailableToReferrer(outer_dex_cache.Get(),
- GetCompilingClass(),
- resolved_method,
- method_idx,
- &storage_index);
- }
+ Handle<mirror::Class> klass = handles_->NewHandle(resolved_method->GetDeclaringClass());
HClinitCheck* clinit_check = nullptr;
-
- if (IsInitialized(resolved_method_class)) {
+ if (IsInitialized(klass)) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
- } else if (storage_index.IsValid()) {
- *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
- HLoadClass* cls = BuildLoadClass(
- storage_index, dex_pc, /* check_access */ false, /* outer */ true);
- clinit_check = new (arena_) HClinitCheck(cls, dex_pc);
- AppendInstruction(clinit_check);
+ } else {
+ HLoadClass* cls = BuildLoadClass(klass->GetDexTypeIndex(),
+ klass->GetDexFile(),
+ klass,
+ dex_pc,
+ /* needs_access_check */ false);
+ if (cls != nullptr) {
+ *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
+ clinit_check = new (arena_) HClinitCheck(cls, dex_pc);
+ AppendInstruction(clinit_check);
+ }
}
return clinit_check;
}
@@ -1216,9 +1194,7 @@
}
ScopedObjectAccess soa(Thread::Current());
- ArtField* resolved_field =
- compiler_driver_->ComputeInstanceFieldInfo(field_index, dex_compilation_unit_, is_put, soa);
-
+ ArtField* resolved_field = ResolveField(field_index, /* is_static */ false, is_put);
// Generate an explicit null check on the reference, unless the field access
// is unresolved. In that case, we rely on the runtime to perform various
@@ -1336,6 +1312,56 @@
}
}
+ArtField* HInstructionBuilder::ResolveField(uint16_t field_idx, bool is_static, bool is_put) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<2> hs(soa.Self());
+
+ ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::Class> compiling_class(hs.NewHandle(GetCompilingClass()));
+
+ ArtField* resolved_field = class_linker->ResolveField(*dex_compilation_unit_->GetDexFile(),
+ field_idx,
+ dex_compilation_unit_->GetDexCache(),
+ class_loader,
+ is_static);
+
+ if (UNLIKELY(resolved_field == nullptr)) {
+ // Clean up any exception left by type resolution.
+ soa.Self()->ClearException();
+ return nullptr;
+ }
+
+ // Check static/instance. The class linker has a fast path for looking into the dex cache
+ // and does not check static/instance if it hits it.
+ if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
+ return nullptr;
+ }
+
+ // Check access.
+ if (compiling_class.Get() == nullptr) {
+ if (!resolved_field->IsPublic()) {
+ return nullptr;
+ }
+ } else if (!compiling_class->CanAccessResolvedField(resolved_field->GetDeclaringClass(),
+ resolved_field,
+ dex_compilation_unit_->GetDexCache().Get(),
+ field_idx)) {
+ return nullptr;
+ }
+
+ if (is_put &&
+ resolved_field->IsFinal() &&
+ (compiling_class.Get() != resolved_field->GetDeclaringClass())) {
+ // Final fields can only be updated within their own class.
+ // TODO: Only allow it in constructors. b/34966607.
+ return nullptr;
+ }
+
+ return resolved_field;
+}
+
bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
uint32_t dex_pc,
bool is_put) {
@@ -1343,12 +1369,7 @@
uint16_t field_index = instruction.VRegB_21c();
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<3> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
- ArtField* resolved_field = compiler_driver_->ResolveField(
- soa, dex_cache, class_loader, dex_compilation_unit_, field_index, true);
+ ArtField* resolved_field = ResolveField(field_index, /* is_static */ true, is_put);
if (resolved_field == nullptr) {
MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
@@ -1358,38 +1379,23 @@
}
Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
- Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
- Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
- // The index at which the field's class is stored in the DexCache's type array.
- dex::TypeIndex storage_index;
- bool is_outer_class = (outer_class.Get() == resolved_field->GetDeclaringClass());
- if (is_outer_class) {
- storage_index = outer_class->GetDexTypeIndex();
- } else if (outer_dex_cache.Get() != dex_cache.Get()) {
- // The compiler driver cannot currently understand multiple dex caches involved. Just bailout.
- return false;
- } else {
- // TODO: This is rather expensive. Perf it and cache the results if needed.
- std::pair<bool, bool> pair = compiler_driver_->IsFastStaticField(
- outer_dex_cache.Get(),
- GetCompilingClass(),
- resolved_field,
- field_index,
- &storage_index);
- bool can_easily_access = is_put ? pair.second : pair.first;
- if (!can_easily_access) {
- MaybeRecordStat(MethodCompilationStat::kUnresolvedFieldNotAFastAccess);
- BuildUnresolvedStaticFieldAccess(instruction, dex_pc, is_put, field_type);
- return true;
- }
+ Handle<mirror::Class> klass = handles_->NewHandle(resolved_field->GetDeclaringClass());
+ HLoadClass* constant = BuildLoadClass(klass->GetDexTypeIndex(),
+ klass->GetDexFile(),
+ klass,
+ dex_pc,
+ /* needs_access_check */ false);
+
+ if (constant == nullptr) {
+ // The class cannot be referenced from this compiled code. Generate
+ // an unresolved access.
+ MaybeRecordStat(MethodCompilationStat::kUnresolvedFieldNotAFastAccess);
+ BuildUnresolvedStaticFieldAccess(instruction, dex_pc, is_put, field_type);
+ return true;
}
- HLoadClass* constant = BuildLoadClass(
- storage_index, dex_pc, /* check_access */ false, /* outer */ true);
-
HInstruction* cls = constant;
- Handle<mirror::Class> klass(hs.NewHandle(resolved_field->GetDeclaringClass()));
if (!IsInitialized(klass)) {
cls = new (arena_) HClinitCheck(constant, dex_pc);
AppendInstruction(cls);
@@ -1497,7 +1503,7 @@
uint32_t* args,
uint32_t register_index) {
HInstruction* length = graph_->GetIntConstant(number_of_vreg_arguments, dex_pc);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
HInstruction* object = new (arena_) HNewArray(cls, length, dex_pc);
AppendInstruction(object);
@@ -1627,44 +1633,68 @@
}
}
-HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
- uint32_t dex_pc,
- bool check_access,
- bool outer) {
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index, uint32_t dex_pc) {
ScopedObjectAccess soa(Thread::Current());
- const DexCompilationUnit* compilation_unit =
- outer ? outer_compilation_unit_ : dex_compilation_unit_;
- const DexFile& dex_file = *compilation_unit->GetDexFile();
- StackHandleScope<1> hs(soa.Self());
+ StackHandleScope<2> hs(soa.Self());
+ const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
Handle<mirror::Class> klass = handles_->NewHandle(compiler_driver_->ResolveClass(
- soa, compilation_unit->GetDexCache(), class_loader, type_index, compilation_unit));
+ soa, dex_compilation_unit_->GetDexCache(), class_loader, type_index, dex_compilation_unit_));
- bool is_accessible = false;
- if (!check_access) {
- is_accessible = true;
- } else if (klass.Get() != nullptr) {
+ bool needs_access_check = true;
+ if (klass.Get() != nullptr) {
if (klass->IsPublic()) {
- is_accessible = true;
+ needs_access_check = false;
} else {
mirror::Class* compiling_class = GetCompilingClass();
if (compiling_class != nullptr && compiling_class->CanAccess(klass.Get())) {
- is_accessible = true;
+ needs_access_check = false;
}
}
}
+ return BuildLoadClass(type_index, dex_file, klass, dex_pc, needs_access_check);
+}
+
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
+ const DexFile& dex_file,
+ Handle<mirror::Class> klass,
+ uint32_t dex_pc,
+ bool needs_access_check) {
+ // Try to find a reference in the compiling dex file.
+ const DexFile* actual_dex_file = &dex_file;
+ if (!IsSameDexFile(dex_file, *dex_compilation_unit_->GetDexFile())) {
+ dex::TypeIndex local_type_index =
+ klass->FindTypeIndexInOtherDexFile(*dex_compilation_unit_->GetDexFile());
+ if (local_type_index.IsValid()) {
+ type_index = local_type_index;
+ actual_dex_file = dex_compilation_unit_->GetDexFile();
+ }
+ }
+
+ // Note: `klass` must be from `handles_`.
HLoadClass* load_class = new (arena_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
- dex_file,
+ *actual_dex_file,
klass,
klass.Get() != nullptr && (klass.Get() == GetOutermostCompilingClass()),
dex_pc,
- !is_accessible);
+ needs_access_check);
+ HLoadClass::LoadKind load_kind = HSharpening::SharpenClass(load_class,
+ code_generator_,
+ compiler_driver_,
+ *dex_compilation_unit_);
+
+ if (load_kind == HLoadClass::LoadKind::kInvalid) {
+ // We actually cannot reference this class, we're forced to bail.
+ return nullptr;
+ }
+ // Append the instruction first, as setting the load kind affects the inputs.
AppendInstruction(load_class);
+ load_class->SetLoadKind(load_kind);
return load_class;
}
@@ -1674,7 +1704,7 @@
dex::TypeIndex type_index,
uint32_t dex_pc) {
HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
ScopedObjectAccess soa(Thread::Current());
TypeCheckKind check_kind = ComputeTypeCheckKind(cls->GetClass());
@@ -2498,7 +2528,7 @@
case Instruction::NEW_ARRAY: {
dex::TypeIndex type_index(instruction.VRegC_22c());
HInstruction* length = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimInt);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
AppendInstruction(new (arena_) HNewArray(cls, length, dex_pc));
UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
break;
@@ -2673,7 +2703,7 @@
case Instruction::CONST_CLASS: {
dex::TypeIndex type_index(instruction.VRegB_21c());
- BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ BuildLoadClass(type_index, dex_pc);
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 5efe950..3bb680c 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -31,6 +31,7 @@
namespace art {
+class CodeGenerator;
class Instruction;
class HInstructionBuilder : public ValueObject {
@@ -44,6 +45,7 @@
DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* const outer_compilation_unit,
CompilerDriver* driver,
+ CodeGenerator* code_generator,
const uint8_t* interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
Handle<mirror::DexCache> dex_cache,
@@ -61,6 +63,7 @@
current_locals_(nullptr),
latest_result_(nullptr),
compiler_driver_(driver),
+ code_generator_(code_generator),
dex_compilation_unit_(dex_compilation_unit),
outer_compilation_unit_(outer_compilation_unit),
interpreter_metadata_(interpreter_metadata),
@@ -228,10 +231,14 @@
// Builds a `HLoadClass` loading the given `type_index`. If `outer` is true,
// this method will use the outer class's dex file to lookup the type at
// `type_index`.
+ HLoadClass* BuildLoadClass(dex::TypeIndex type_index, uint32_t dex_pc);
+
HLoadClass* BuildLoadClass(dex::TypeIndex type_index,
+ const DexFile& dex_file,
+ Handle<mirror::Class> klass,
uint32_t dex_pc,
- bool check_access,
- bool outer = false);
+ bool needs_access_check)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the outer-most compiling method's class.
mirror::Class* GetOutermostCompilingClass() const;
@@ -275,7 +282,6 @@
HClinitCheck* ProcessClinitCheckForInvoke(
uint32_t dex_pc,
ArtMethod* method,
- uint32_t method_idx,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -290,6 +296,10 @@
// not be resolved.
ArtMethod* ResolveMethod(uint16_t method_idx, InvokeType invoke_type);
+ // Try to resolve a field using the class linker. Return null if it could not
+ // be found.
+ ArtField* ResolveField(uint16_t field_idx, bool is_static, bool is_put);
+
ArenaAllocator* const arena_;
HGraph* const graph_;
VariableSizedHandleScope* handles_;
@@ -311,6 +321,8 @@
CompilerDriver* const compiler_driver_;
+ CodeGenerator* const code_generator_;
+
// The compilation unit of the current method being compiled. Note that
// it can be an inlined method.
DexCompilationUnit* const dex_compilation_unit_;
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 1e73cf6..6425e13 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -31,6 +31,9 @@
static constexpr uint32_t kPositiveInfinityFloat = 0x7f800000U;
static constexpr uint64_t kPositiveInfinityDouble = UINT64_C(0x7ff0000000000000);
+static constexpr uint32_t kNanFloat = 0x7fc00000U;
+static constexpr uint64_t kNanDouble = 0x7ff8000000000000;
+
// Recognize intrinsics from HInvoke nodes.
class IntrinsicsRecognizer : public HOptimization {
public:
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 68c2d2e..70a3d38 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -40,10 +40,12 @@
using helpers::LowRegisterFrom;
using helpers::LowSRegisterFrom;
using helpers::OutputDRegister;
+using helpers::OutputSRegister;
using helpers::OutputRegister;
using helpers::OutputVRegister;
using helpers::RegisterFrom;
using helpers::SRegisterFrom;
+using helpers::DRegisterFromS;
using namespace vixl::aarch32; // NOLINT(build/namespaces)
@@ -462,6 +464,214 @@
GenAbsInteger(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
}
+static void GenMinMaxFloat(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
+ Location op1_loc = invoke->GetLocations()->InAt(0);
+ Location op2_loc = invoke->GetLocations()->InAt(1);
+ Location out_loc = invoke->GetLocations()->Out();
+
+ // Optimization: don't generate any code if inputs are the same.
+ if (op1_loc.Equals(op2_loc)) {
+ DCHECK(out_loc.Equals(op1_loc)); // out_loc is set as SameAsFirstInput() in location builder.
+ return;
+ }
+
+ vixl32::SRegister op1 = SRegisterFrom(op1_loc);
+ vixl32::SRegister op2 = SRegisterFrom(op2_loc);
+ vixl32::SRegister out = OutputSRegister(invoke);
+ UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+ const vixl32::Register temp1 = temps.Acquire();
+ vixl32::Register temp2 = RegisterFrom(invoke->GetLocations()->GetTemp(0));
+ vixl32::Label nan, done;
+
+ DCHECK(op1.Is(out));
+
+ __ Vcmp(op1, op2);
+ __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
+ __ B(vs, &nan, /* far_target */ false); // if un-ordered, go to NaN handling.
+
+ // op1 <> op2
+ vixl32::ConditionType cond = is_min ? gt : lt;
+ {
+ ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(),
+ 2 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ __ it(cond);
+ __ vmov(cond, F32, out, op2);
+ }
+ __ B(ne, &done, /* far_target */ false); // for <>(not equal), we've done min/max calculation.
+
+ // handle op1 == op2, max(+0.0,-0.0), min(+0.0,-0.0).
+ __ Vmov(temp1, op1);
+ __ Vmov(temp2, op2);
+ if (is_min) {
+ __ Orr(temp1, temp1, temp2);
+ } else {
+ __ And(temp1, temp1, temp2);
+ }
+ __ Vmov(out, temp1);
+ __ B(&done);
+
+ // handle NaN input.
+ __ Bind(&nan);
+ __ Movt(temp1, High16Bits(kNanFloat)); // 0x7FC0xxxx is a NaN.
+ __ Vmov(out, temp1);
+
+ __ Bind(&done);
+}
+
+static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) {
+ CreateFPFPToFPLocations(arena_, invoke);
+ invoke->GetLocations()->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) {
+ GenMinMaxFloat(invoke, /* is_min */ true, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) {
+ CreateFPFPToFPLocations(arena_, invoke);
+ invoke->GetLocations()->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) {
+ GenMinMaxFloat(invoke, /* is_min */ false, GetAssembler());
+}
+
+static void GenMinMaxDouble(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
+ Location op1_loc = invoke->GetLocations()->InAt(0);
+ Location op2_loc = invoke->GetLocations()->InAt(1);
+ Location out_loc = invoke->GetLocations()->Out();
+
+ // Optimization: don't generate any code if inputs are the same.
+ if (op1_loc.Equals(op2_loc)) {
+ DCHECK(out_loc.Equals(op1_loc)); // out_loc is set as SameAsFirstInput() in.
+ return;
+ }
+
+ vixl32::DRegister op1 = DRegisterFrom(op1_loc);
+ vixl32::DRegister op2 = DRegisterFrom(op2_loc);
+ vixl32::DRegister out = OutputDRegister(invoke);
+ vixl32::Label handle_nan_eq, done;
+
+ DCHECK(op1.Is(out));
+
+ __ Vcmp(op1, op2);
+ __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
+ __ B(vs, &handle_nan_eq, /* far_target */ false); // if un-ordered, go to NaN handling.
+
+ // op1 <> op2
+ vixl32::ConditionType cond = is_min ? gt : lt;
+ {
+ ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(),
+ 2 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ __ it(cond);
+ __ vmov(cond, F64, out, op2);
+ }
+ __ B(ne, &done, /* far_target */ false); // for <>(not equal), we've done min/max calculation.
+
+ // handle op1 == op2, max(+0.0,-0.0).
+ if (!is_min) {
+ __ Vand(F64, out, op1, op2);
+ __ B(&done);
+ }
+
+ // handle op1 == op2, min(+0.0,-0.0), NaN input.
+ __ Bind(&handle_nan_eq);
+ __ Vorr(F64, out, op1, op2); // assemble op1/-0.0/NaN.
+
+ __ Bind(&done);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
+ CreateFPFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
+ GenMinMaxDouble(invoke, /* is_min */ true , GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
+ CreateFPFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
+ GenMinMaxDouble(invoke, /* is_min */ false, GetAssembler());
+}
+
+static void GenMinMaxLong(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
+ Location op1_loc = invoke->GetLocations()->InAt(0);
+ Location op2_loc = invoke->GetLocations()->InAt(1);
+ Location out_loc = invoke->GetLocations()->Out();
+
+ // Optimization: don't generate any code if inputs are the same.
+ if (op1_loc.Equals(op2_loc)) {
+ DCHECK(out_loc.Equals(op1_loc)); // out_loc is set as SameAsFirstInput() in location builder.
+ return;
+ }
+
+ vixl32::Register op1_lo = LowRegisterFrom(op1_loc);
+ vixl32::Register op1_hi = HighRegisterFrom(op1_loc);
+ vixl32::Register op2_lo = LowRegisterFrom(op2_loc);
+ vixl32::Register op2_hi = HighRegisterFrom(op2_loc);
+ vixl32::Register out_lo = LowRegisterFrom(out_loc);
+ vixl32::Register out_hi = HighRegisterFrom(out_loc);
+ UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+ const vixl32::Register temp = temps.Acquire();
+
+ DCHECK(op1_lo.Is(out_lo));
+ DCHECK(op1_hi.Is(out_hi));
+
+ // Compare op1 >= op2, or op1 < op2.
+ __ Cmp(out_lo, op2_lo);
+ __ Sbcs(temp, out_hi, op2_hi);
+
+ // Now GE/LT condition code is correct for the long comparison.
+ {
+ vixl32::ConditionType cond = is_min ? ge : lt;
+ ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(),
+ 3 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ __ itt(cond);
+ __ mov(cond, out_lo, op2_lo);
+ __ mov(cond, out_hi, op2_hi);
+ }
+}
+
+static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathMinLongLong(HInvoke* invoke) {
+ CreateLongLongToLongLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathMinLongLong(HInvoke* invoke) {
+ GenMinMaxLong(invoke, /* is_min */ true, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) {
+ CreateLongLongToLongLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) {
+ GenMinMaxLong(invoke, /* is_min */ false, GetAssembler());
+}
+
static void GenMinMax(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
vixl32::Register op1 = InputRegisterAt(invoke, 0);
vixl32::Register op2 = InputRegisterAt(invoke, 1);
@@ -514,6 +724,18 @@
__ Vsqrt(OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
+void IntrinsicLocationsBuilderARMVIXL::VisitMathRint(HInvoke* invoke) {
+ if (features_.HasARMv8AInstructions()) {
+ CreateFPToFPLocations(arena_, invoke);
+ }
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathRint(HInvoke* invoke) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
+ ArmVIXLAssembler* assembler = GetAssembler();
+ __ Vrintn(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+}
+
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
CreateIntToIntLocations(arena_, invoke);
}
@@ -2742,15 +2964,30 @@
__ Bind(slow_path->GetExitLabel());
}
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinDoubleDouble)
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinFloatFloat)
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxDoubleDouble)
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxFloatFloat)
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinLongLong)
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxLongLong)
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathCeil) // Could be done by changing rounding mode, maybe?
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathFloor) // Could be done by changing rounding mode, maybe?
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRint)
+void IntrinsicLocationsBuilderARMVIXL::VisitMathCeil(HInvoke* invoke) {
+ if (features_.HasARMv8AInstructions()) {
+ CreateFPToFPLocations(arena_, invoke);
+ }
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathCeil(HInvoke* invoke) {
+ ArmVIXLAssembler* assembler = GetAssembler();
+ DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
+ __ Vrintp(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathFloor(HInvoke* invoke) {
+ if (features_.HasARMv8AInstructions()) {
+ CreateFPToFPLocations(arena_, invoke);
+ }
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathFloor(HInvoke* invoke) {
+ ArmVIXLAssembler* assembler = GetAssembler();
+ DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
+ __ Vrintm(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+}
+
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundFloat) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure.
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index d15145e..abbb91a 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1354,13 +1354,15 @@
return os;
}
-void HInstruction::MoveBefore(HInstruction* cursor) {
- DCHECK(!IsPhi());
- DCHECK(!IsControlFlow());
- DCHECK(CanBeMoved() ||
- // HShouldDeoptimizeFlag can only be moved by CHAGuardOptimization.
- IsShouldDeoptimizeFlag());
- DCHECK(!cursor->IsPhi());
+void HInstruction::MoveBefore(HInstruction* cursor, bool do_checks) {
+ if (do_checks) {
+ DCHECK(!IsPhi());
+ DCHECK(!IsControlFlow());
+ DCHECK(CanBeMoved() ||
+ // HShouldDeoptimizeFlag can only be moved by CHAGuardOptimization.
+ IsShouldDeoptimizeFlag());
+ DCHECK(!cursor->IsPhi());
+ }
next_->previous_ = previous_;
if (previous_ != nullptr) {
@@ -2462,16 +2464,15 @@
}
}
-void HLoadClass::SetLoadKindInternal(LoadKind load_kind) {
- // Once sharpened, the load kind should not be changed again.
- // Also, kReferrersClass should never be overwritten.
- DCHECK_EQ(GetLoadKind(), LoadKind::kDexCacheViaMethod);
+void HLoadClass::SetLoadKind(LoadKind load_kind) {
SetPackedField<LoadKindField>(load_kind);
- if (load_kind != LoadKind::kDexCacheViaMethod) {
+ if (load_kind != LoadKind::kDexCacheViaMethod &&
+ load_kind != LoadKind::kReferrersClass) {
RemoveAsUserOfInput(0u);
SetRawInputAt(0u, nullptr);
}
+
if (!NeedsEnvironment()) {
RemoveEnvironment();
SetSideEffects(SideEffects::None());
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index f0ea9e2..96f9aba 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2065,8 +2065,8 @@
other->ReplaceInput(this, use_index);
}
- // Move `this` instruction before `cursor`.
- void MoveBefore(HInstruction* cursor);
+ // Move `this` instruction before `cursor`
+ void MoveBefore(HInstruction* cursor, bool do_checks = true);
// Move `this` before its first user and out of any loops. If there is no
// out-of-loop user that dominates all other users, move the instruction
@@ -4322,6 +4322,11 @@
return (obj == InputAt(0)) && !GetLocations()->Intrinsified();
}
+ bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+ // The assembly stub currently needs it.
+ return true;
+ }
+
uint32_t GetImtIndex() const { return imt_index_; }
uint32_t GetDexMethodIndex() const { return dex_method_index_; }
@@ -5508,6 +5513,9 @@
public:
// Determines how to load the Class.
enum class LoadKind {
+ // We cannot load this class. See HSharpening::SharpenLoadClass.
+ kInvalid = -1,
+
// Use the Class* from the method's own ArtMethod*.
kReferrersClass,
@@ -5564,18 +5572,7 @@
SetPackedFlag<kFlagGenerateClInitCheck>(false);
}
- void SetLoadKind(LoadKind load_kind) {
- SetLoadKindInternal(load_kind);
- }
-
- void SetLoadKindWithTypeReference(LoadKind load_kind,
- const DexFile& dex_file,
- dex::TypeIndex type_index) {
- DCHECK(HasTypeReference(load_kind));
- DCHECK(IsSameDexFile(dex_file_, dex_file));
- DCHECK_EQ(type_index_, type_index);
- SetLoadKindInternal(load_kind);
- }
+ void SetLoadKind(LoadKind load_kind);
LoadKind GetLoadKind() const {
return GetPackedField<LoadKindField>();
@@ -5694,6 +5691,11 @@
// for PC-relative loads, i.e. kBssEntry or kBootImageLinkTimePcRelative.
HUserRecord<HInstruction*> special_input_;
+ // A type index and dex file where the class can be accessed. The dex file can be:
+ // - The compiling method's dex file if the class is defined there too.
+ // - The compiling method's dex file if the class is referenced there.
+ // - The dex file where the class is defined. When the load kind can only be
+ // kBssEntry or kDexCacheViaMethod, we cannot emit code for this `HLoadClass`.
const dex::TypeIndex type_index_;
const DexFile& dex_file_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 297500b..727ca7d 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -90,6 +90,7 @@
#include "reference_type_propagation.h"
#include "register_allocator_linear_scan.h"
#include "select_generator.h"
+#include "scheduler.h"
#include "sharpening.h"
#include "side_effects_analysis.h"
#include "ssa_builder.h"
@@ -658,10 +659,13 @@
new (arena) arm64::InstructionSimplifierArm64(graph, stats);
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ HInstructionScheduling* scheduling =
+ new (arena) HInstructionScheduling(graph, instruction_set);
HOptimization* arm64_optimizations[] = {
simplifier,
side_effects,
- gvn
+ gvn,
+ scheduling,
};
RunOptimizations(arm64_optimizations, arraysize(arm64_optimizations), pass_observer);
break;
@@ -995,6 +999,7 @@
&dex_file,
*code_item,
compiler_driver,
+ codegen.get(),
compilation_stats_.get(),
interpreter_metadata,
dex_cache,
@@ -1129,6 +1134,25 @@
return false;
}
+bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
+ // Note: the runtime is null only for unit testing.
+ return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
+}
+
+bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee) {
+ if (!Runtime::Current()->IsAotCompiler()) {
+ // JIT can always encode methods in stack maps.
+ return true;
+ }
+ if (IsSameDexFile(caller_dex_file, *callee->GetDexFile())) {
+ return true;
+ }
+ // TODO(ngeoffray): Support more AOT cases for inlining:
+ // - methods in multidex
+ // - methods in boot image for on-device non-PIC compilation.
+ return false;
+}
+
bool OptimizingCompiler::JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
ArtMethod* method,
diff --git a/compiler/optimizing/optimizing_compiler.h b/compiler/optimizing/optimizing_compiler.h
index 0c89da1..d8cea30 100644
--- a/compiler/optimizing/optimizing_compiler.h
+++ b/compiler/optimizing/optimizing_compiler.h
@@ -17,10 +17,15 @@
#ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_H_
#define ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_H_
+#include "base/mutex.h"
+#include "globals.h"
+
namespace art {
+class ArtMethod;
class Compiler;
class CompilerDriver;
+class DexFile;
Compiler* CreateOptimizingCompiler(CompilerDriver* driver);
@@ -29,6 +34,10 @@
// information for checking invariants.
bool IsCompilingWithCoreImage();
+bool EncodeArtMethodInInlineInfo(ArtMethod* method);
+bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_H_
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 58d9017..bf963b8 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -64,6 +64,9 @@
void RemoveSuspendChecks(HGraph* graph) {
for (HBasicBlock* block : graph->GetBlocks()) {
if (block != nullptr) {
+ if (block->GetLoopInformation() != nullptr) {
+ block->GetLoopInformation()->SetSuspendCheck(nullptr);
+ }
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
if (current->IsSuspendCheck()) {
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
new file mode 100644
index 0000000..d65d20c
--- /dev/null
+++ b/compiler/optimizing/scheduler.cc
@@ -0,0 +1,610 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+
+#include "prepare_for_register_allocation.h"
+#include "scheduler.h"
+
+#ifdef ART_ENABLE_CODEGEN_arm64
+#include "scheduler_arm64.h"
+#endif
+
+namespace art {
+
+void SchedulingGraph::AddDependency(SchedulingNode* node,
+ SchedulingNode* dependency,
+ bool is_data_dependency) {
+ if (node == nullptr || dependency == nullptr) {
+ // A `nullptr` node indicates an instruction out of scheduling range (eg. in
+ // an other block), so we do not need to add a dependency edge to the graph.
+ return;
+ }
+
+ if (is_data_dependency) {
+ if (!HasImmediateDataDependency(node, dependency)) {
+ node->AddDataPredecessor(dependency);
+ }
+ } else if (!HasImmediateOtherDependency(node, dependency)) {
+ node->AddOtherPredecessor(dependency);
+ }
+}
+
+static bool MayHaveReorderingDependency(SideEffects node, SideEffects other) {
+ // Read after write.
+ if (node.MayDependOn(other)) {
+ return true;
+ }
+
+ // Write after read.
+ if (other.MayDependOn(node)) {
+ return true;
+ }
+
+ // Memory write after write.
+ if (node.DoesAnyWrite() && other.DoesAnyWrite()) {
+ return true;
+ }
+
+ return false;
+}
+
+
+// Check whether `node` depends on `other`, taking into account `SideEffect`
+// information and `CanThrow` information.
+static bool HasSideEffectDependency(const HInstruction* node, const HInstruction* other) {
+ if (MayHaveReorderingDependency(node->GetSideEffects(), other->GetSideEffects())) {
+ return true;
+ }
+
+ if (other->CanThrow() && node->GetSideEffects().DoesAnyWrite()) {
+ return true;
+ }
+
+ if (other->GetSideEffects().DoesAnyWrite() && node->CanThrow()) {
+ return true;
+ }
+
+ if (other->CanThrow() && node->CanThrow()) {
+ return true;
+ }
+
+ // Check side-effect dependency between ArrayGet and BoundsCheck.
+ if (node->IsArrayGet() && other->IsBoundsCheck() && node->InputAt(1) == other) {
+ return true;
+ }
+
+ return false;
+}
+
+void SchedulingGraph::AddDependencies(HInstruction* instruction, bool is_scheduling_barrier) {
+ SchedulingNode* instruction_node = GetNode(instruction);
+
+ // Define-use dependencies.
+ for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
+ AddDataDependency(GetNode(use.GetUser()), instruction_node);
+ }
+
+ // Scheduling barrier dependencies.
+ DCHECK(!is_scheduling_barrier || contains_scheduling_barrier_);
+ if (contains_scheduling_barrier_) {
+ // A barrier depends on instructions after it. And instructions before the
+ // barrier depend on it.
+ for (HInstruction* other = instruction->GetNext(); other != nullptr; other = other->GetNext()) {
+ SchedulingNode* other_node = GetNode(other);
+ bool other_is_barrier = other_node->IsSchedulingBarrier();
+ if (is_scheduling_barrier || other_is_barrier) {
+ AddOtherDependency(other_node, instruction_node);
+ }
+ if (other_is_barrier) {
+ // This other scheduling barrier guarantees ordering of instructions after
+ // it, so avoid creating additional useless dependencies in the graph.
+ // For example if we have
+ // instr_1
+ // barrier_2
+ // instr_3
+ // barrier_4
+ // instr_5
+ // we only create the following non-data dependencies
+ // 1 -> 2
+ // 2 -> 3
+ // 2 -> 4
+ // 3 -> 4
+ // 4 -> 5
+ // and do not create
+ // 1 -> 4
+ // 2 -> 5
+ // Note that in this example we could also avoid creating the dependency
+ // `2 -> 4`. But if we remove `instr_3` that dependency is required to
+ // order the barriers. So we generate it to avoid a special case.
+ break;
+ }
+ }
+ }
+
+ // Side effect dependencies.
+ if (!instruction->GetSideEffects().DoesNothing() || instruction->CanThrow()) {
+ for (HInstruction* other = instruction->GetNext(); other != nullptr; other = other->GetNext()) {
+ SchedulingNode* other_node = GetNode(other);
+ if (other_node->IsSchedulingBarrier()) {
+ // We have reached a scheduling barrier so we can stop further
+ // processing.
+ DCHECK(HasImmediateOtherDependency(other_node, instruction_node));
+ break;
+ }
+ if (HasSideEffectDependency(other, instruction)) {
+ AddOtherDependency(other_node, instruction_node);
+ }
+ }
+ }
+
+ // Environment dependencies.
+ // We do not need to process those if the instruction is a scheduling barrier,
+ // since the barrier already has non-data dependencies on all following
+ // instructions.
+ if (!is_scheduling_barrier) {
+ for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
+ // Note that here we could stop processing if the environment holder is
+ // across a scheduling barrier. But checking this would likely require
+ // more work than simply iterating through environment uses.
+ AddOtherDependency(GetNode(use.GetUser()->GetHolder()), instruction_node);
+ }
+ }
+}
+
+bool SchedulingGraph::HasImmediateDataDependency(const SchedulingNode* node,
+ const SchedulingNode* other) const {
+ return ContainsElement(node->GetDataPredecessors(), other);
+}
+
+bool SchedulingGraph::HasImmediateDataDependency(const HInstruction* instruction,
+ const HInstruction* other_instruction) const {
+ const SchedulingNode* node = GetNode(instruction);
+ const SchedulingNode* other = GetNode(other_instruction);
+ if (node == nullptr || other == nullptr) {
+ // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their
+ // corresponding SchedulingNode in the graph, and tell whether there is a dependency.
+ // Otherwise there is no dependency from SchedulingGraph's perspective, for example,
+ // instruction and other_instruction are in different basic blocks.
+ return false;
+ }
+ return HasImmediateDataDependency(node, other);
+}
+
+bool SchedulingGraph::HasImmediateOtherDependency(const SchedulingNode* node,
+ const SchedulingNode* other) const {
+ return ContainsElement(node->GetOtherPredecessors(), other);
+}
+
+bool SchedulingGraph::HasImmediateOtherDependency(const HInstruction* instruction,
+ const HInstruction* other_instruction) const {
+ const SchedulingNode* node = GetNode(instruction);
+ const SchedulingNode* other = GetNode(other_instruction);
+ if (node == nullptr || other == nullptr) {
+ // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their
+ // corresponding SchedulingNode in the graph, and tell whether there is a dependency.
+ // Otherwise there is no dependency from SchedulingGraph's perspective, for example,
+ // instruction and other_instruction are in different basic blocks.
+ return false;
+ }
+ return HasImmediateOtherDependency(node, other);
+}
+
+static const std::string InstructionTypeId(const HInstruction* instruction) {
+ std::string id;
+ Primitive::Type type = instruction->GetType();
+ if (type == Primitive::kPrimNot) {
+ id.append("l");
+ } else {
+ id.append(Primitive::Descriptor(instruction->GetType()));
+ }
+ // Use lower-case to be closer to the `HGraphVisualizer` output.
+ id[0] = std::tolower(id[0]);
+ id.append(std::to_string(instruction->GetId()));
+ return id;
+}
+
+// Ideally we would reuse the graph visualizer code, but it is not available
+// from here and it is not worth moving all that code only for our use.
+static void DumpAsDotNode(std::ostream& output, const SchedulingNode* node) {
+ const HInstruction* instruction = node->GetInstruction();
+ // Use the instruction typed id as the node identifier.
+ std::string instruction_id = InstructionTypeId(instruction);
+ output << instruction_id << "[shape=record, label=\""
+ << instruction_id << ' ' << instruction->DebugName() << " [";
+ // List the instruction's inputs in its description. When visualizing the
+ // graph this helps differentiating data inputs from other dependencies.
+ const char* seperator = "";
+ for (const HInstruction* input : instruction->GetInputs()) {
+ output << seperator << InstructionTypeId(input);
+ seperator = ",";
+ }
+ output << "]";
+ // Other properties of the node.
+ output << "\\ninternal_latency: " << node->GetInternalLatency();
+ output << "\\ncritical_path: " << node->GetCriticalPath();
+ if (node->IsSchedulingBarrier()) {
+ output << "\\n(barrier)";
+ }
+ output << "\"];\n";
+ // We want program order to go from top to bottom in the graph output, so we
+ // reverse the edges and specify `dir=back`.
+ for (const SchedulingNode* predecessor : node->GetDataPredecessors()) {
+ const HInstruction* predecessor_instruction = predecessor->GetInstruction();
+ output << InstructionTypeId(predecessor_instruction) << ":s -> " << instruction_id << ":n "
+ << "[label=\"" << predecessor->GetLatency() << "\",dir=back]\n";
+ }
+ for (const SchedulingNode* predecessor : node->GetOtherPredecessors()) {
+ const HInstruction* predecessor_instruction = predecessor->GetInstruction();
+ output << InstructionTypeId(predecessor_instruction) << ":s -> " << instruction_id << ":n "
+ << "[dir=back,color=blue]\n";
+ }
+}
+
+void SchedulingGraph::DumpAsDotGraph(const std::string& description,
+ const ArenaVector<SchedulingNode*>& initial_candidates) {
+ // TODO(xueliang): ideally we should move scheduling information into HInstruction, after that
+ // we should move this dotty graph dump feature to visualizer, and have a compiler option for it.
+ std::ofstream output("scheduling_graphs.dot", std::ofstream::out | std::ofstream::app);
+ // Description of this graph, as a comment.
+ output << "// " << description << "\n";
+ // Start the dot graph. Use an increasing index for easier differentiation.
+ output << "digraph G {\n";
+ for (const auto& entry : nodes_map_) {
+ DumpAsDotNode(output, entry.second);
+ }
+ // Create a fake 'end_of_scheduling' node to help visualization of critical_paths.
+ for (auto node : initial_candidates) {
+ const HInstruction* instruction = node->GetInstruction();
+ output << InstructionTypeId(instruction) << ":s -> end_of_scheduling:n "
+ << "[label=\"" << node->GetLatency() << "\",dir=back]\n";
+ }
+ // End of the dot graph.
+ output << "}\n";
+ output.close();
+}
+
+SchedulingNode* CriticalPathSchedulingNodeSelector::SelectMaterializedCondition(
+ ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) const {
+ // Schedule condition inputs that can be materialized immediately before their use.
+ // In following example, after we've scheduled HSelect, we want LessThan to be scheduled
+ // immediately, because it is a materialized condition, and will be emitted right before HSelect
+ // in codegen phase.
+ //
+ // i20 HLessThan [...] HLessThan HAdd HAdd
+ // i21 HAdd [...] ===> | | |
+ // i22 HAdd [...] +----------+---------+
+ // i23 HSelect [i21, i22, i20] HSelect
+
+ if (prev_select_ == nullptr) {
+ return nullptr;
+ }
+
+ const HInstruction* instruction = prev_select_->GetInstruction();
+ const HCondition* condition = nullptr;
+ DCHECK(instruction != nullptr);
+
+ if (instruction->IsIf()) {
+ condition = instruction->AsIf()->InputAt(0)->AsCondition();
+ } else if (instruction->IsSelect()) {
+ condition = instruction->AsSelect()->GetCondition()->AsCondition();
+ }
+
+ SchedulingNode* condition_node = (condition != nullptr) ? graph.GetNode(condition) : nullptr;
+
+ if ((condition_node != nullptr) &&
+ condition->HasOnlyOneNonEnvironmentUse() &&
+ ContainsElement(*nodes, condition_node)) {
+ DCHECK(!condition_node->HasUnscheduledSuccessors());
+ // Remove the condition from the list of candidates and schedule it.
+ RemoveElement(*nodes, condition_node);
+ return condition_node;
+ }
+
+ return nullptr;
+}
+
+SchedulingNode* CriticalPathSchedulingNodeSelector::PopHighestPriorityNode(
+ ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) {
+ DCHECK(!nodes->empty());
+ SchedulingNode* select_node = nullptr;
+
+ // Optimize for materialized condition and its emit before use scenario.
+ select_node = SelectMaterializedCondition(nodes, graph);
+
+ if (select_node == nullptr) {
+ // Get highest priority node based on critical path information.
+ select_node = (*nodes)[0];
+ size_t select = 0;
+ for (size_t i = 1, e = nodes->size(); i < e; i++) {
+ SchedulingNode* check = (*nodes)[i];
+ SchedulingNode* candidate = (*nodes)[select];
+ select_node = GetHigherPrioritySchedulingNode(candidate, check);
+ if (select_node == check) {
+ select = i;
+ }
+ }
+ DeleteNodeAtIndex(nodes, select);
+ }
+
+ prev_select_ = select_node;
+ return select_node;
+}
+
+SchedulingNode* CriticalPathSchedulingNodeSelector::GetHigherPrioritySchedulingNode(
+ SchedulingNode* candidate, SchedulingNode* check) const {
+ uint32_t candidate_path = candidate->GetCriticalPath();
+ uint32_t check_path = check->GetCriticalPath();
+ // First look at the critical_path.
+ if (check_path != candidate_path) {
+ return check_path < candidate_path ? check : candidate;
+ }
+ // If both critical paths are equal, schedule instructions with a higher latency
+ // first in program order.
+ return check->GetLatency() < candidate->GetLatency() ? check : candidate;
+}
+
+void HScheduler::Schedule(HGraph* graph) {
+ for (HBasicBlock* block : graph->GetReversePostOrder()) {
+ if (IsSchedulable(block)) {
+ Schedule(block);
+ }
+ }
+}
+
+void HScheduler::Schedule(HBasicBlock* block) {
+ ArenaVector<SchedulingNode*> scheduling_nodes(arena_->Adapter(kArenaAllocScheduler));
+
+ // Build the scheduling graph.
+ scheduling_graph_.Clear();
+ for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ SchedulingNode* node = scheduling_graph_.AddNode(instruction, IsSchedulingBarrier(instruction));
+ CalculateLatency(node);
+ scheduling_nodes.push_back(node);
+ }
+
+ if (scheduling_graph_.Size() <= 1) {
+ scheduling_graph_.Clear();
+ return;
+ }
+
+ cursor_ = block->GetLastInstruction();
+
+ // Find the initial candidates for scheduling.
+ candidates_.clear();
+ for (SchedulingNode* node : scheduling_nodes) {
+ if (!node->HasUnscheduledSuccessors()) {
+ node->MaybeUpdateCriticalPath(node->GetLatency());
+ candidates_.push_back(node);
+ }
+ }
+
+ ArenaVector<SchedulingNode*> initial_candidates(arena_->Adapter(kArenaAllocScheduler));
+ if (kDumpDotSchedulingGraphs) {
+ // Remember the list of initial candidates for debug output purposes.
+ initial_candidates.assign(candidates_.begin(), candidates_.end());
+ }
+
+ // Schedule all nodes.
+ while (!candidates_.empty()) {
+ Schedule(selector_->PopHighestPriorityNode(&candidates_, scheduling_graph_));
+ }
+
+ if (kDumpDotSchedulingGraphs) {
+ // Dump the graph in `dot` format.
+ HGraph* graph = block->GetGraph();
+ std::stringstream description;
+ description << graph->GetDexFile().PrettyMethod(graph->GetMethodIdx())
+ << " B" << block->GetBlockId();
+ scheduling_graph_.DumpAsDotGraph(description.str(), initial_candidates);
+ }
+}
+
+void HScheduler::Schedule(SchedulingNode* scheduling_node) {
+ // Check whether any of the node's predecessors will be valid candidates after
+ // this node is scheduled.
+ uint32_t path_to_node = scheduling_node->GetCriticalPath();
+ for (SchedulingNode* predecessor : scheduling_node->GetDataPredecessors()) {
+ predecessor->MaybeUpdateCriticalPath(
+ path_to_node + predecessor->GetInternalLatency() + predecessor->GetLatency());
+ predecessor->DecrementNumberOfUnscheduledSuccessors();
+ if (!predecessor->HasUnscheduledSuccessors()) {
+ candidates_.push_back(predecessor);
+ }
+ }
+ for (SchedulingNode* predecessor : scheduling_node->GetOtherPredecessors()) {
+ // Do not update the critical path.
+ // The 'other' (so 'non-data') dependencies (usually) do not represent a
+ // 'material' dependency of nodes on others. They exist for program
+ // correctness. So we do not use them to compute the critical path.
+ predecessor->DecrementNumberOfUnscheduledSuccessors();
+ if (!predecessor->HasUnscheduledSuccessors()) {
+ candidates_.push_back(predecessor);
+ }
+ }
+
+ Schedule(scheduling_node->GetInstruction());
+}
+
+// Move an instruction after cursor instruction inside one basic block.
+static void MoveAfterInBlock(HInstruction* instruction, HInstruction* cursor) {
+ DCHECK_EQ(instruction->GetBlock(), cursor->GetBlock());
+ DCHECK_NE(cursor, cursor->GetBlock()->GetLastInstruction());
+ DCHECK(!instruction->IsControlFlow());
+ DCHECK(!cursor->IsControlFlow());
+ instruction->MoveBefore(cursor->GetNext(), /* do_checks */ false);
+}
+
+void HScheduler::Schedule(HInstruction* instruction) {
+ if (instruction == cursor_) {
+ cursor_ = cursor_->GetPrevious();
+ } else {
+ MoveAfterInBlock(instruction, cursor_);
+ }
+}
+
+bool HScheduler::IsSchedulable(const HInstruction* instruction) const {
+ // We want to avoid exhaustively listing all instructions, so we first check
+ // for instruction categories that we know are safe.
+ if (instruction->IsControlFlow() ||
+ instruction->IsConstant()) {
+ return true;
+ }
+ // Currently all unary and binary operations are safe to schedule, so avoid
+ // checking for each of them individually.
+ // Since nothing prevents a new scheduling-unsafe HInstruction to subclass
+ // HUnaryOperation (or HBinaryOperation), check in debug mode that we have
+ // the exhaustive lists here.
+ if (instruction->IsUnaryOperation()) {
+ DCHECK(instruction->IsBooleanNot() ||
+ instruction->IsNot() ||
+ instruction->IsNeg()) << "unexpected instruction " << instruction->DebugName();
+ return true;
+ }
+ if (instruction->IsBinaryOperation()) {
+ DCHECK(instruction->IsAdd() ||
+ instruction->IsAnd() ||
+ instruction->IsCompare() ||
+ instruction->IsCondition() ||
+ instruction->IsDiv() ||
+ instruction->IsMul() ||
+ instruction->IsOr() ||
+ instruction->IsRem() ||
+ instruction->IsRor() ||
+ instruction->IsShl() ||
+ instruction->IsShr() ||
+ instruction->IsSub() ||
+ instruction->IsUShr() ||
+ instruction->IsXor()) << "unexpected instruction " << instruction->DebugName();
+ return true;
+ }
+ // The scheduler should not see any of these.
+ DCHECK(!instruction->IsParallelMove()) << "unexpected instruction " << instruction->DebugName();
+ // List of instructions explicitly excluded:
+ // HClearException
+ // HClinitCheck
+ // HDeoptimize
+ // HLoadClass
+ // HLoadException
+ // HMemoryBarrier
+ // HMonitorOperation
+ // HNativeDebugInfo
+ // HThrow
+ // HTryBoundary
+ // TODO: Some of the instructions above may be safe to schedule (maybe as
+ // scheduling barriers).
+ return instruction->IsArrayGet() ||
+ instruction->IsArraySet() ||
+ instruction->IsArrayLength() ||
+ instruction->IsBoundType() ||
+ instruction->IsBoundsCheck() ||
+ instruction->IsCheckCast() ||
+ instruction->IsClassTableGet() ||
+ instruction->IsCurrentMethod() ||
+ instruction->IsDivZeroCheck() ||
+ instruction->IsInstanceFieldGet() ||
+ instruction->IsInstanceFieldSet() ||
+ instruction->IsInstanceOf() ||
+ instruction->IsInvokeInterface() ||
+ instruction->IsInvokeStaticOrDirect() ||
+ instruction->IsInvokeUnresolved() ||
+ instruction->IsInvokeVirtual() ||
+ instruction->IsLoadString() ||
+ instruction->IsNewArray() ||
+ instruction->IsNewInstance() ||
+ instruction->IsNullCheck() ||
+ instruction->IsPackedSwitch() ||
+ instruction->IsParameterValue() ||
+ instruction->IsPhi() ||
+ instruction->IsReturn() ||
+ instruction->IsReturnVoid() ||
+ instruction->IsSelect() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsStaticFieldSet() ||
+ instruction->IsSuspendCheck() ||
+ instruction->IsTypeConversion() ||
+ instruction->IsUnresolvedInstanceFieldGet() ||
+ instruction->IsUnresolvedInstanceFieldSet() ||
+ instruction->IsUnresolvedStaticFieldGet() ||
+ instruction->IsUnresolvedStaticFieldSet();
+}
+
+bool HScheduler::IsSchedulable(const HBasicBlock* block) const {
+ // We may be only interested in loop blocks.
+ if (only_optimize_loop_blocks_ && !block->IsInLoop()) {
+ return false;
+ }
+ if (block->GetTryCatchInformation() != nullptr) {
+ // Do not schedule blocks that are part of try-catch.
+ // Because scheduler cannot see if catch block has assumptions on the instruction order in
+ // the try block. In following example, if we enable scheduler for the try block,
+ // MulitiplyAccumulate may be scheduled before DivZeroCheck,
+ // which can result in an incorrect value in the catch block.
+ // try {
+ // a = a/b; // DivZeroCheck
+ // // Div
+ // c = c*d+e; // MulitiplyAccumulate
+ // } catch {System.out.print(c); }
+ return false;
+ }
+ // Check whether all instructions in this block are schedulable.
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ if (!IsSchedulable(it.Current())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool HScheduler::IsSchedulingBarrier(const HInstruction* instr) const {
+ return instr->IsControlFlow() ||
+ // Don't break calling convention.
+ instr->IsParameterValue() ||
+ // Code generation of goto relies on SuspendCheck's position.
+ instr->IsSuspendCheck();
+}
+
+void HInstructionScheduling::Run(bool only_optimize_loop_blocks,
+ bool schedule_randomly) {
+ // Avoid compilation error when compiling for unsupported instruction set.
+ UNUSED(only_optimize_loop_blocks);
+ UNUSED(schedule_randomly);
+ switch (instruction_set_) {
+#ifdef ART_ENABLE_CODEGEN_arm64
+ case kArm64: {
+ // Phase-local allocator that allocates scheduler internal data structures like
+ // scheduling nodes, internel nodes map, dependencies, etc.
+ ArenaAllocator arena_allocator(graph_->GetArena()->GetArenaPool());
+
+ CriticalPathSchedulingNodeSelector critical_path_selector;
+ RandomSchedulingNodeSelector random_selector;
+ SchedulingNodeSelector* selector = schedule_randomly
+ ? static_cast<SchedulingNodeSelector*>(&random_selector)
+ : static_cast<SchedulingNodeSelector*>(&critical_path_selector);
+
+ arm64::HSchedulerARM64 scheduler(&arena_allocator, selector);
+ scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
+ scheduler.Schedule(graph_);
+ break;
+ }
+#endif
+ default:
+ break;
+ }
+}
+
+} // namespace art
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
new file mode 100644
index 0000000..ab0dad4
--- /dev/null
+++ b/compiler/optimizing/scheduler.h
@@ -0,0 +1,487 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_SCHEDULER_H_
+#define ART_COMPILER_OPTIMIZING_SCHEDULER_H_
+
+#include <fstream>
+
+#include "base/time_utils.h"
+#include "driver/compiler_driver.h"
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+
+// General description of instruction scheduling.
+//
+// This pass tries to improve the quality of the generated code by reordering
+// instructions in the graph to avoid execution delays caused by execution
+// dependencies.
+// Currently, scheduling is performed at the block level, so no `HInstruction`
+// ever leaves its block in this pass.
+//
+// The scheduling process iterates through blocks in the graph. For blocks that
+// we can and want to schedule:
+// 1) Build a dependency graph for instructions.
+// It includes data dependencies (inputs/uses), but also environment
+// dependencies and side-effect dependencies.
+// 2) Schedule the dependency graph.
+// This is a topological sort of the dependency graph, using heuristics to
+// decide what node to scheduler first when there are multiple candidates.
+//
+// A few factors impacting the quality of the scheduling are:
+// - The heuristics used to decide what node to schedule in the topological sort
+// when there are multiple valid candidates. There is a wide range of
+// complexity possible here, going from a simple model only considering
+// latencies, to a super detailed CPU pipeline model.
+// - Fewer dependencies in the dependency graph give more freedom for the
+// scheduling heuristics. For example de-aliasing can allow possibilities for
+// reordering of memory accesses.
+// - The level of abstraction of the IR. It is easier to evaluate scheduling for
+// IRs that translate to a single assembly instruction than for IRs
+// that generate multiple assembly instructions or generate different code
+// depending on properties of the IR.
+// - Scheduling is performed before register allocation, it is not aware of the
+// impact of moving instructions on register allocation.
+//
+//
+// The scheduling code uses the terms predecessors, successors, and dependencies.
+// This can be confusing at times, so here are clarifications.
+// These terms are used from the point of view of the program dependency graph. So
+// the inputs of an instruction are part of its dependencies, and hence part its
+// predecessors. So the uses of an instruction are (part of) its successors.
+// (Side-effect dependencies can yield predecessors or successors that are not
+// inputs or uses.)
+//
+// Here is a trivial example. For the Java code:
+//
+// int a = 1 + 2;
+//
+// we would have the instructions
+//
+// i1 HIntConstant 1
+// i2 HIntConstant 2
+// i3 HAdd [i1,i2]
+//
+// `i1` and `i2` are predecessors of `i3`.
+// `i3` is a successor of `i1` and a successor of `i2`.
+// In a scheduling graph for this code we would have three nodes `n1`, `n2`,
+// and `n3` (respectively for instructions `i1`, `i1`, and `i3`).
+// Conceptually the program dependency graph for this would contain two edges
+//
+// n1 -> n3
+// n2 -> n3
+//
+// Since we schedule backwards (starting from the last instruction in each basic
+// block), the implementation of nodes keeps a list of pointers their
+// predecessors. So `n3` would keep pointers to its predecessors `n1` and `n2`.
+//
+// Node dependencies are also referred to from the program dependency graph
+// point of view: we say that node `B` immediately depends on `A` if there is an
+// edge from `A` to `B` in the program dependency graph. `A` is a predecessor of
+// `B`, `B` is a successor of `A`. In the example above `n3` depends on `n1` and
+// `n2`.
+// Since nodes in the scheduling graph keep a list of their predecessors, node
+// `B` will have a pointer to its predecessor `A`.
+// As we schedule backwards, `B` will be selected for scheduling before `A` is.
+//
+// So the scheduling for the example above could happen as follow
+//
+// |---------------------------+------------------------|
+// | candidates for scheduling | instructions scheduled |
+// | --------------------------+------------------------|
+//
+// The only node without successors is `n3`, so it is the only initial
+// candidate.
+//
+// | n3 | (none) |
+//
+// We schedule `n3` as the last (and only) instruction. All its predecessors
+// that do not have any unscheduled successors become candidate. That is, `n1`
+// and `n2` become candidates.
+//
+// | n1, n2 | n3 |
+//
+// One of the candidates is selected. In practice this is where scheduling
+// heuristics kick in, to decide which of the candidates should be selected.
+// In this example, let it be `n1`. It is scheduled before previously scheduled
+// nodes (in program order). There are no other nodes to add to the list of
+// candidates.
+//
+// | n2 | n1 |
+// | | n3 |
+//
+// The only candidate available for scheduling is `n2`. Schedule it before
+// (in program order) the previously scheduled nodes.
+//
+// | (none) | n2 |
+// | | n1 |
+// | | n3 |
+// |---------------------------+------------------------|
+//
+// So finally the instructions will be executed in the order `i2`, `i1`, and `i3`.
+// In this trivial example, it does not matter which of `i1` and `i2` is
+// scheduled first since they are constants. However the same process would
+// apply if `i1` and `i2` were actual operations (for example `HMul` and `HDiv`).
+
+// Set to true to have instruction scheduling dump scheduling graphs to the file
+// `scheduling_graphs.dot`. See `SchedulingGraph::DumpAsDotGraph()`.
+static constexpr bool kDumpDotSchedulingGraphs = false;
+
+// Typically used as a default instruction latency.
+static constexpr uint32_t kGenericInstructionLatency = 1;
+
+class HScheduler;
+
+/**
+ * A node representing an `HInstruction` in the `SchedulingGraph`.
+ */
+class SchedulingNode : public ArenaObject<kArenaAllocScheduler> {
+ public:
+ SchedulingNode(HInstruction* instr, ArenaAllocator* arena, bool is_scheduling_barrier)
+ : latency_(0),
+ internal_latency_(0),
+ critical_path_(0),
+ instruction_(instr),
+ is_scheduling_barrier_(is_scheduling_barrier),
+ data_predecessors_(arena->Adapter(kArenaAllocScheduler)),
+ other_predecessors_(arena->Adapter(kArenaAllocScheduler)),
+ num_unscheduled_successors_(0) {
+ data_predecessors_.reserve(kPreallocatedPredecessors);
+ }
+
+ void AddDataPredecessor(SchedulingNode* predecessor) {
+ data_predecessors_.push_back(predecessor);
+ predecessor->num_unscheduled_successors_++;
+ }
+
+ void AddOtherPredecessor(SchedulingNode* predecessor) {
+ other_predecessors_.push_back(predecessor);
+ predecessor->num_unscheduled_successors_++;
+ }
+
+ void DecrementNumberOfUnscheduledSuccessors() {
+ num_unscheduled_successors_--;
+ }
+
+ void MaybeUpdateCriticalPath(uint32_t other_critical_path) {
+ critical_path_ = std::max(critical_path_, other_critical_path);
+ }
+
+ bool HasUnscheduledSuccessors() const {
+ return num_unscheduled_successors_ != 0;
+ }
+
+ HInstruction* GetInstruction() const { return instruction_; }
+ uint32_t GetLatency() const { return latency_; }
+ void SetLatency(uint32_t latency) { latency_ = latency; }
+ uint32_t GetInternalLatency() const { return internal_latency_; }
+ void SetInternalLatency(uint32_t internal_latency) { internal_latency_ = internal_latency; }
+ uint32_t GetCriticalPath() const { return critical_path_; }
+ bool IsSchedulingBarrier() const { return is_scheduling_barrier_; }
+ const ArenaVector<SchedulingNode*>& GetDataPredecessors() const { return data_predecessors_; }
+ const ArenaVector<SchedulingNode*>& GetOtherPredecessors() const { return other_predecessors_; }
+
+ private:
+ // The latency of this node. It represents the latency between the moment the
+ // last instruction for this node has executed to the moment the result
+ // produced by this node is available to users.
+ uint32_t latency_;
+ // This represents the time spent *within* the generated code for this node.
+ // It should be zero for nodes that only generate a single instruction.
+ uint32_t internal_latency_;
+
+ // The critical path from this instruction to the end of scheduling. It is
+ // used by the scheduling heuristics to measure the priority of this instruction.
+ // It is defined as
+ // critical_path_ = latency_ + max((use.internal_latency_ + use.critical_path_) for all uses)
+ // (Note that here 'uses' is equivalent to 'data successors'. Also see comments in
+ // `HScheduler::Schedule(SchedulingNode* scheduling_node)`).
+ uint32_t critical_path_;
+
+ // The instruction that this node represents.
+ HInstruction* const instruction_;
+
+ // If a node is scheduling barrier, other nodes cannot be scheduled before it.
+ const bool is_scheduling_barrier_;
+
+ // The lists of predecessors. They cannot be scheduled before this node. Once
+ // this node is scheduled, we check whether any of its predecessors has become a
+ // valid candidate for scheduling.
+ // Predecessors in `data_predecessors_` are data dependencies. Those in
+ // `other_predecessors_` contain side-effect dependencies, environment
+ // dependencies, and scheduling barrier dependencies.
+ ArenaVector<SchedulingNode*> data_predecessors_;
+ ArenaVector<SchedulingNode*> other_predecessors_;
+
+ // The number of unscheduled successors for this node. This number is
+ // decremented as successors are scheduled. When it reaches zero this node
+ // becomes a valid candidate to schedule.
+ uint32_t num_unscheduled_successors_;
+
+ static constexpr size_t kPreallocatedPredecessors = 4;
+};
+
+/*
+ * Directed acyclic graph for scheduling.
+ */
+class SchedulingGraph : public ValueObject {
+ public:
+ SchedulingGraph(const HScheduler* scheduler, ArenaAllocator* arena)
+ : scheduler_(scheduler),
+ arena_(arena),
+ contains_scheduling_barrier_(false),
+ nodes_map_(arena_->Adapter(kArenaAllocScheduler)) {}
+
+ SchedulingNode* AddNode(HInstruction* instr, bool is_scheduling_barrier = false) {
+ SchedulingNode* node = new (arena_) SchedulingNode(instr, arena_, is_scheduling_barrier);
+ nodes_map_.Insert(std::make_pair(instr, node));
+ contains_scheduling_barrier_ |= is_scheduling_barrier;
+ AddDependencies(instr, is_scheduling_barrier);
+ return node;
+ }
+
+ void Clear() {
+ nodes_map_.Clear();
+ contains_scheduling_barrier_ = false;
+ }
+
+ SchedulingNode* GetNode(const HInstruction* instr) const {
+ auto it = nodes_map_.Find(instr);
+ if (it == nodes_map_.end()) {
+ return nullptr;
+ } else {
+ return it->second;
+ }
+ }
+
+ bool IsSchedulingBarrier(const HInstruction* instruction) const;
+
+ bool HasImmediateDataDependency(const SchedulingNode* node, const SchedulingNode* other) const;
+ bool HasImmediateDataDependency(const HInstruction* node, const HInstruction* other) const;
+ bool HasImmediateOtherDependency(const SchedulingNode* node, const SchedulingNode* other) const;
+ bool HasImmediateOtherDependency(const HInstruction* node, const HInstruction* other) const;
+
+ size_t Size() const {
+ return nodes_map_.Size();
+ }
+
+ // Dump the scheduling graph, in dot file format, appending it to the file
+ // `scheduling_graphs.dot`.
+ void DumpAsDotGraph(const std::string& description,
+ const ArenaVector<SchedulingNode*>& initial_candidates);
+
+ protected:
+ void AddDependency(SchedulingNode* node, SchedulingNode* dependency, bool is_data_dependency);
+ void AddDataDependency(SchedulingNode* node, SchedulingNode* dependency) {
+ AddDependency(node, dependency, /*is_data_dependency*/true);
+ }
+ void AddOtherDependency(SchedulingNode* node, SchedulingNode* dependency) {
+ AddDependency(node, dependency, /*is_data_dependency*/false);
+ }
+
+ // Add dependencies nodes for the given `HInstruction`: inputs, environments, and side-effects.
+ void AddDependencies(HInstruction* instruction, bool is_scheduling_barrier = false);
+
+ const HScheduler* const scheduler_;
+
+ ArenaAllocator* const arena_;
+
+ bool contains_scheduling_barrier_;
+
+ ArenaHashMap<const HInstruction*, SchedulingNode*> nodes_map_;
+};
+
+/*
+ * The visitors derived from this base class are used by schedulers to evaluate
+ * the latencies of `HInstruction`s.
+ */
+class SchedulingLatencyVisitor : public HGraphDelegateVisitor {
+ public:
+ // This class and its sub-classes will never be used to drive a visit of an
+ // `HGraph` but only to visit `HInstructions` one at a time, so we do not need
+ // to pass a valid graph to `HGraphDelegateVisitor()`.
+ SchedulingLatencyVisitor() : HGraphDelegateVisitor(nullptr) {}
+
+ void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ LOG(FATAL) << "Error visiting " << instruction->DebugName() << ". "
+ "Architecture-specific scheduling latency visitors must handle all instructions"
+ " (potentially by overriding the generic `VisitInstruction()`.";
+ UNREACHABLE();
+ }
+
+ void Visit(HInstruction* instruction) {
+ instruction->Accept(this);
+ }
+
+ void CalculateLatency(SchedulingNode* node) {
+ // By default nodes have no internal latency.
+ last_visited_internal_latency_ = 0;
+ Visit(node->GetInstruction());
+ }
+
+ uint32_t GetLastVisitedLatency() const { return last_visited_latency_; }
+ uint32_t GetLastVisitedInternalLatency() const { return last_visited_internal_latency_; }
+
+ protected:
+ // The latency of the most recent visited SchedulingNode.
+ // This is for reporting the latency value to the user of this visitor.
+ uint32_t last_visited_latency_;
+ // This represents the time spent *within* the generated code for the most recent visited
+ // SchedulingNode. This is for reporting the internal latency value to the user of this visitor.
+ uint32_t last_visited_internal_latency_;
+};
+
+class SchedulingNodeSelector : public ArenaObject<kArenaAllocScheduler> {
+ public:
+ virtual SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes,
+ const SchedulingGraph& graph) = 0;
+ virtual ~SchedulingNodeSelector() {}
+ protected:
+ static void DeleteNodeAtIndex(ArenaVector<SchedulingNode*>* nodes, size_t index) {
+ (*nodes)[index] = nodes->back();
+ nodes->pop_back();
+ }
+};
+
+/*
+ * Select a `SchedulingNode` at random within the candidates.
+ */
+class RandomSchedulingNodeSelector : public SchedulingNodeSelector {
+ public:
+ explicit RandomSchedulingNodeSelector() : seed_(0) {
+ seed_ = static_cast<uint32_t>(NanoTime());
+ srand(seed_);
+ }
+
+ SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes,
+ const SchedulingGraph& graph) OVERRIDE {
+ UNUSED(graph);
+ DCHECK(!nodes->empty());
+ size_t select = rand_r(&seed_) % nodes->size();
+ SchedulingNode* select_node = (*nodes)[select];
+ DeleteNodeAtIndex(nodes, select);
+ return select_node;
+ }
+
+ uint32_t seed_;
+};
+
+/*
+ * Select a `SchedulingNode` according to critical path information,
+ * with heuristics to favor certain instruction patterns like materialized condition.
+ */
+class CriticalPathSchedulingNodeSelector : public SchedulingNodeSelector {
+ public:
+ CriticalPathSchedulingNodeSelector() : prev_select_(nullptr) {}
+
+ SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes,
+ const SchedulingGraph& graph) OVERRIDE;
+
+ protected:
+ SchedulingNode* GetHigherPrioritySchedulingNode(SchedulingNode* candidate,
+ SchedulingNode* check) const;
+
+ SchedulingNode* SelectMaterializedCondition(ArenaVector<SchedulingNode*>* nodes,
+ const SchedulingGraph& graph) const;
+
+ private:
+ const SchedulingNode* prev_select_;
+};
+
+class HScheduler {
+ public:
+ HScheduler(ArenaAllocator* arena,
+ SchedulingLatencyVisitor* latency_visitor,
+ SchedulingNodeSelector* selector)
+ : arena_(arena),
+ latency_visitor_(latency_visitor),
+ selector_(selector),
+ only_optimize_loop_blocks_(true),
+ scheduling_graph_(this, arena),
+ candidates_(arena_->Adapter(kArenaAllocScheduler)) {}
+ virtual ~HScheduler() {}
+
+ void Schedule(HGraph* graph);
+
+ void SetOnlyOptimizeLoopBlocks(bool loop_only) { only_optimize_loop_blocks_ = loop_only; }
+
+ // Instructions can not be rescheduled across a scheduling barrier.
+ virtual bool IsSchedulingBarrier(const HInstruction* instruction) const;
+
+ protected:
+ void Schedule(HBasicBlock* block);
+ void Schedule(SchedulingNode* scheduling_node);
+ void Schedule(HInstruction* instruction);
+
+ // Any instruction returning `false` via this method will prevent its
+ // containing basic block from being scheduled.
+ // This method is used to restrict scheduling to instructions that we know are
+ // safe to handle.
+ virtual bool IsSchedulable(const HInstruction* instruction) const;
+ bool IsSchedulable(const HBasicBlock* block) const;
+
+ void CalculateLatency(SchedulingNode* node) {
+ latency_visitor_->CalculateLatency(node);
+ node->SetLatency(latency_visitor_->GetLastVisitedLatency());
+ node->SetInternalLatency(latency_visitor_->GetLastVisitedInternalLatency());
+ }
+
+ ArenaAllocator* const arena_;
+ SchedulingLatencyVisitor* const latency_visitor_;
+ SchedulingNodeSelector* const selector_;
+ bool only_optimize_loop_blocks_;
+
+ // We instantiate the members below as part of this class to avoid
+ // instantiating them locally for every chunk scheduled.
+ SchedulingGraph scheduling_graph_;
+ // A pointer indicating where the next instruction to be scheduled will be inserted.
+ HInstruction* cursor_;
+ // The list of candidates for scheduling. A node becomes a candidate when all
+ // its predecessors have been scheduled.
+ ArenaVector<SchedulingNode*> candidates_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HScheduler);
+};
+
+inline bool SchedulingGraph::IsSchedulingBarrier(const HInstruction* instruction) const {
+ return scheduler_->IsSchedulingBarrier(instruction);
+}
+
+class HInstructionScheduling : public HOptimization {
+ public:
+ HInstructionScheduling(HGraph* graph, InstructionSet instruction_set)
+ : HOptimization(graph, kInstructionScheduling),
+ instruction_set_(instruction_set) {}
+
+ void Run() {
+ Run(/*only_optimize_loop_blocks*/ true, /*schedule_randomly*/ false);
+ }
+ void Run(bool only_optimize_loop_blocks, bool schedule_randomly);
+
+ static constexpr const char* kInstructionScheduling = "scheduler";
+
+ const InstructionSet instruction_set_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HInstructionScheduling);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_SCHEDULER_H_
diff --git a/compiler/optimizing/scheduler_arm64.cc b/compiler/optimizing/scheduler_arm64.cc
new file mode 100644
index 0000000..e3701fb
--- /dev/null
+++ b/compiler/optimizing/scheduler_arm64.cc
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "scheduler_arm64.h"
+#include "code_generator_utils.h"
+
+namespace art {
+namespace arm64 {
+
+void SchedulingLatencyVisitorARM64::VisitBinaryOperation(HBinaryOperation* instr) {
+ last_visited_latency_ = Primitive::IsFloatingPointType(instr->GetResultType())
+ ? kArm64FloatingPointOpLatency
+ : kArm64IntegerOpLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitBitwiseNegatedRight(
+ HBitwiseNegatedRight* ATTRIBUTE_UNUSED) {
+ last_visited_latency_ = kArm64IntegerOpLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitArm64DataProcWithShifterOp(
+ HArm64DataProcWithShifterOp* ATTRIBUTE_UNUSED) {
+ last_visited_latency_ = kArm64DataProcWithShifterOpLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitIntermediateAddress(
+ HIntermediateAddress* ATTRIBUTE_UNUSED) {
+ // Although the code generated is a simple `add` instruction, we found through empirical results
+ // that spacing it from its use in memory accesses was beneficial.
+ last_visited_latency_ = kArm64IntegerOpLatency + 2;
+}
+
+void SchedulingLatencyVisitorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* ATTRIBUTE_UNUSED) {
+ last_visited_latency_ = kArm64MulIntegerLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitArrayGet(HArrayGet* instruction) {
+ if (!instruction->GetArray()->IsIntermediateAddress()) {
+ // Take the intermediate address computation into account.
+ last_visited_internal_latency_ = kArm64IntegerOpLatency;
+ }
+ last_visited_latency_ = kArm64MemoryLoadLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitArrayLength(HArrayLength* ATTRIBUTE_UNUSED) {
+ last_visited_latency_ = kArm64MemoryLoadLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitArraySet(HArraySet* ATTRIBUTE_UNUSED) {
+ last_visited_latency_ = kArm64MemoryStoreLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitBoundsCheck(HBoundsCheck* ATTRIBUTE_UNUSED) {
+ last_visited_internal_latency_ = kArm64IntegerOpLatency;
+ // Users do not use any data results.
+ last_visited_latency_ = 0;
+}
+
+void SchedulingLatencyVisitorARM64::VisitDiv(HDiv* instr) {
+ Primitive::Type type = instr->GetResultType();
+ switch (type) {
+ case Primitive::kPrimFloat:
+ last_visited_latency_ = kArm64DivFloatLatency;
+ break;
+ case Primitive::kPrimDouble:
+ last_visited_latency_ = kArm64DivDoubleLatency;
+ break;
+ default:
+ // Follow the code path used by code generation.
+ if (instr->GetRight()->IsConstant()) {
+ int64_t imm = Int64FromConstant(instr->GetRight()->AsConstant());
+ if (imm == 0) {
+ last_visited_internal_latency_ = 0;
+ last_visited_latency_ = 0;
+ } else if (imm == 1 || imm == -1) {
+ last_visited_internal_latency_ = 0;
+ last_visited_latency_ = kArm64IntegerOpLatency;
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+ last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency;
+ last_visited_latency_ = kArm64IntegerOpLatency;
+ } else {
+ DCHECK(imm <= -2 || imm >= 2);
+ last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency;
+ last_visited_latency_ = kArm64MulIntegerLatency;
+ }
+ } else {
+ last_visited_latency_ = kArm64DivIntegerLatency;
+ }
+ break;
+ }
+}
+
+void SchedulingLatencyVisitorARM64::VisitInstanceFieldGet(HInstanceFieldGet* ATTRIBUTE_UNUSED) {
+ last_visited_latency_ = kArm64MemoryLoadLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitInstanceOf(HInstanceOf* ATTRIBUTE_UNUSED) {
+ last_visited_internal_latency_ = kArm64CallInternalLatency;
+ last_visited_latency_ = kArm64IntegerOpLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitInvoke(HInvoke* ATTRIBUTE_UNUSED) {
+ last_visited_internal_latency_ = kArm64CallInternalLatency;
+ last_visited_latency_ = kArm64CallLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitLoadString(HLoadString* ATTRIBUTE_UNUSED) {
+ last_visited_internal_latency_ = kArm64LoadStringInternalLatency;
+ last_visited_latency_ = kArm64MemoryLoadLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitMul(HMul* instr) {
+ last_visited_latency_ = Primitive::IsFloatingPointType(instr->GetResultType())
+ ? kArm64MulFloatingPointLatency
+ : kArm64MulIntegerLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitNewArray(HNewArray* ATTRIBUTE_UNUSED) {
+ last_visited_internal_latency_ = kArm64IntegerOpLatency + kArm64CallInternalLatency;
+ last_visited_latency_ = kArm64CallLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitNewInstance(HNewInstance* instruction) {
+ if (instruction->IsStringAlloc()) {
+ last_visited_internal_latency_ = 2 + kArm64MemoryLoadLatency + kArm64CallInternalLatency;
+ } else {
+ last_visited_internal_latency_ = kArm64CallInternalLatency;
+ }
+ last_visited_latency_ = kArm64CallLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitRem(HRem* instruction) {
+ if (Primitive::IsFloatingPointType(instruction->GetResultType())) {
+ last_visited_internal_latency_ = kArm64CallInternalLatency;
+ last_visited_latency_ = kArm64CallLatency;
+ } else {
+ // Follow the code path used by code generation.
+ if (instruction->GetRight()->IsConstant()) {
+ int64_t imm = Int64FromConstant(instruction->GetRight()->AsConstant());
+ if (imm == 0) {
+ last_visited_internal_latency_ = 0;
+ last_visited_latency_ = 0;
+ } else if (imm == 1 || imm == -1) {
+ last_visited_internal_latency_ = 0;
+ last_visited_latency_ = kArm64IntegerOpLatency;
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+ last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency;
+ last_visited_latency_ = kArm64IntegerOpLatency;
+ } else {
+ DCHECK(imm <= -2 || imm >= 2);
+ last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency;
+ last_visited_latency_ = kArm64MulIntegerLatency;
+ }
+ } else {
+ last_visited_internal_latency_ = kArm64DivIntegerLatency;
+ last_visited_latency_ = kArm64MulIntegerLatency;
+ }
+ }
+}
+
+void SchedulingLatencyVisitorARM64::VisitStaticFieldGet(HStaticFieldGet* ATTRIBUTE_UNUSED) {
+ last_visited_latency_ = kArm64MemoryLoadLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
+ HBasicBlock* block = instruction->GetBlock();
+ DCHECK((block->GetLoopInformation() != nullptr) ||
+ (block->IsEntryBlock() && instruction->GetNext()->IsGoto()));
+ // Users do not use any data results.
+ last_visited_latency_ = 0;
+}
+
+void SchedulingLatencyVisitorARM64::VisitTypeConversion(HTypeConversion* instr) {
+ if (Primitive::IsFloatingPointType(instr->GetResultType()) ||
+ Primitive::IsFloatingPointType(instr->GetInputType())) {
+ last_visited_latency_ = kArm64TypeConversionFloatingPointIntegerLatency;
+ } else {
+ last_visited_latency_ = kArm64IntegerOpLatency;
+ }
+}
+
+} // namespace arm64
+} // namespace art
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
new file mode 100644
index 0000000..702027c
--- /dev/null
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_SCHEDULER_ARM64_H_
+#define ART_COMPILER_OPTIMIZING_SCHEDULER_ARM64_H_
+
+#include "scheduler.h"
+
+namespace art {
+namespace arm64 {
+
+static constexpr uint32_t kArm64MemoryLoadLatency = 5;
+static constexpr uint32_t kArm64MemoryStoreLatency = 3;
+
+static constexpr uint32_t kArm64CallInternalLatency = 10;
+static constexpr uint32_t kArm64CallLatency = 5;
+
+// AArch64 instruction latency.
+// We currently assume that all arm64 CPUs share the same instruction latency list.
+static constexpr uint32_t kArm64IntegerOpLatency = 2;
+static constexpr uint32_t kArm64FloatingPointOpLatency = 5;
+
+
+static constexpr uint32_t kArm64DataProcWithShifterOpLatency = 3;
+static constexpr uint32_t kArm64DivDoubleLatency = 30;
+static constexpr uint32_t kArm64DivFloatLatency = 15;
+static constexpr uint32_t kArm64DivIntegerLatency = 5;
+static constexpr uint32_t kArm64LoadStringInternalLatency = 7;
+static constexpr uint32_t kArm64MulFloatingPointLatency = 6;
+static constexpr uint32_t kArm64MulIntegerLatency = 6;
+static constexpr uint32_t kArm64TypeConversionFloatingPointIntegerLatency = 5;
+
+class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor {
+ public:
+ // Default visitor for instructions not handled specifically below.
+ void VisitInstruction(HInstruction* ATTRIBUTE_UNUSED) {
+ last_visited_latency_ = kArm64IntegerOpLatency;
+ }
+
+// We add a second unused parameter to be able to use this macro like the others
+// defined in `nodes.h`.
+#define FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(M) \
+ M(ArrayGet , unused) \
+ M(ArrayLength , unused) \
+ M(ArraySet , unused) \
+ M(BinaryOperation , unused) \
+ M(BoundsCheck , unused) \
+ M(Div , unused) \
+ M(InstanceFieldGet , unused) \
+ M(InstanceOf , unused) \
+ M(Invoke , unused) \
+ M(LoadString , unused) \
+ M(Mul , unused) \
+ M(NewArray , unused) \
+ M(NewInstance , unused) \
+ M(Rem , unused) \
+ M(StaticFieldGet , unused) \
+ M(SuspendCheck , unused) \
+ M(TypeConversion , unused)
+
+#define FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(M) \
+ M(BitwiseNegatedRight, unused) \
+ M(MultiplyAccumulate, unused) \
+ M(IntermediateAddress, unused)
+
+#define DECLARE_VISIT_INSTRUCTION(type, unused) \
+ void Visit##type(H##type* instruction) OVERRIDE;
+
+ FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+};
+
+class HSchedulerARM64 : public HScheduler {
+ public:
+ HSchedulerARM64(ArenaAllocator* arena, SchedulingNodeSelector* selector)
+ : HScheduler(arena, &arm64_latency_visitor_, selector) {}
+ ~HSchedulerARM64() OVERRIDE {}
+
+ bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
+#define CASE_INSTRUCTION_KIND(type, unused) case \
+ HInstruction::InstructionKind::k##type:
+ switch (instruction->GetKind()) {
+ FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(CASE_INSTRUCTION_KIND)
+ return true;
+ FOR_EACH_CONCRETE_INSTRUCTION_ARM64(CASE_INSTRUCTION_KIND)
+ return true;
+ default:
+ return HScheduler::IsSchedulable(instruction);
+ }
+#undef CASE_INSTRUCTION_KIND
+ }
+
+ private:
+ SchedulingLatencyVisitorARM64 arm64_latency_visitor_;
+ DISALLOW_COPY_AND_ASSIGN(HSchedulerARM64);
+};
+
+} // namespace arm64
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_SCHEDULER_ARM64_H_
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
new file mode 100644
index 0000000..31d13e2
--- /dev/null
+++ b/compiler/optimizing/scheduler_test.cc
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/arena_allocator.h"
+#include "builder.h"
+#include "codegen_test_utils.h"
+#include "common_compiler_test.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+#include "pc_relative_fixups_x86.h"
+#include "register_allocator.h"
+#include "scheduler.h"
+
+#ifdef ART_ENABLE_CODEGEN_arm64
+#include "scheduler_arm64.h"
+#endif
+
+namespace art {
+
+// Return all combinations of ISA and code generator that are executable on
+// hardware, or on simulator, and that we'd like to test.
+static ::std::vector<CodegenTargetConfig> GetTargetConfigs() {
+ ::std::vector<CodegenTargetConfig> v;
+ ::std::vector<CodegenTargetConfig> test_config_candidates = {
+#ifdef ART_ENABLE_CODEGEN_arm
+ CodegenTargetConfig(kArm, create_codegen_arm),
+ CodegenTargetConfig(kThumb2, create_codegen_arm),
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
+ CodegenTargetConfig(kArm64, create_codegen_arm64),
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+ CodegenTargetConfig(kX86, create_codegen_x86),
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+ CodegenTargetConfig(kX86_64, create_codegen_x86_64),
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
+ CodegenTargetConfig(kMips, create_codegen_mips),
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
+ CodegenTargetConfig(kMips64, create_codegen_mips64)
+#endif
+ };
+
+ for (auto test_config : test_config_candidates) {
+ if (CanExecute(test_config.GetInstructionSet())) {
+ v.push_back(test_config);
+ }
+ }
+
+ return v;
+}
+
+class SchedulerTest : public CommonCompilerTest {};
+
+#ifdef ART_ENABLE_CODEGEN_arm64
+TEST_F(SchedulerTest, DependencyGraph) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph(&allocator);
+ HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block1 = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry);
+ graph->AddBlock(block1);
+ graph->SetEntryBlock(entry);
+
+ // entry:
+ // array ParameterValue
+ // c1 IntConstant
+ // c2 IntConstant
+ // block1:
+ // add1 Add [c1, c2]
+ // add2 Add [add1, c2]
+ // mul Mul [add1, add2]
+ // div_check DivZeroCheck [add2] (env: add2, mul)
+ // div Div [add1, div_check]
+ // array_get1 ArrayGet [array, add1]
+ // array_set1 ArraySet [array, add1, add2]
+ // array_get2 ArrayGet [array, add1]
+ // array_set2 ArraySet [array, add1, add2]
+
+ HInstruction* array = new (&allocator) HParameterValue(graph->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ Primitive::kPrimNot);
+ HInstruction* c1 = graph->GetIntConstant(1);
+ HInstruction* c2 = graph->GetIntConstant(10);
+ HInstruction* add1 = new (&allocator) HAdd(Primitive::kPrimInt, c1, c2);
+ HInstruction* add2 = new (&allocator) HAdd(Primitive::kPrimInt, add1, c2);
+ HInstruction* mul = new (&allocator) HMul(Primitive::kPrimInt, add1, add2);
+ HInstruction* div_check = new (&allocator) HDivZeroCheck(add2, 0);
+ HInstruction* div = new (&allocator) HDiv(Primitive::kPrimInt, add1, div_check, 0);
+ HInstruction* array_get1 = new (&allocator) HArrayGet(array, add1, Primitive::kPrimInt, 0);
+ HInstruction* array_set1 = new (&allocator) HArraySet(array, add1, add2, Primitive::kPrimInt, 0);
+ HInstruction* array_get2 = new (&allocator) HArrayGet(array, add1, Primitive::kPrimInt, 0);
+ HInstruction* array_set2 = new (&allocator) HArraySet(array, add1, add2, Primitive::kPrimInt, 0);
+
+ DCHECK(div_check->CanThrow());
+
+ entry->AddInstruction(array);
+
+ HInstruction* block_instructions[] = {add1,
+ add2,
+ mul,
+ div_check,
+ div,
+ array_get1,
+ array_set1,
+ array_get2,
+ array_set2};
+ for (auto instr : block_instructions) {
+ block1->AddInstruction(instr);
+ }
+
+ HEnvironment* environment = new (&allocator) HEnvironment(&allocator,
+ 2,
+ graph->GetArtMethod(),
+ 0,
+ div_check);
+ div_check->SetRawEnvironment(environment);
+ environment->SetRawEnvAt(0, add2);
+ add2->AddEnvUseAt(div_check->GetEnvironment(), 0);
+ environment->SetRawEnvAt(1, mul);
+ mul->AddEnvUseAt(div_check->GetEnvironment(), 1);
+
+ ArenaAllocator* arena = graph->GetArena();
+ CriticalPathSchedulingNodeSelector critical_path_selector;
+ arm64::HSchedulerARM64 scheduler(arena, &critical_path_selector);
+ SchedulingGraph scheduling_graph(&scheduler, arena);
+ // Instructions must be inserted in reverse order into the scheduling graph.
+ for (auto instr : ReverseRange(block_instructions)) {
+ scheduling_graph.AddNode(instr);
+ }
+
+ // Should not have dependencies cross basic blocks.
+ ASSERT_FALSE(scheduling_graph.HasImmediateDataDependency(add1, c1));
+ ASSERT_FALSE(scheduling_graph.HasImmediateDataDependency(add2, c2));
+
+ // Define-use dependency.
+ ASSERT_TRUE(scheduling_graph.HasImmediateDataDependency(add2, add1));
+ ASSERT_FALSE(scheduling_graph.HasImmediateDataDependency(add1, add2));
+ ASSERT_TRUE(scheduling_graph.HasImmediateDataDependency(div_check, add2));
+ ASSERT_FALSE(scheduling_graph.HasImmediateDataDependency(div_check, add1));
+ ASSERT_TRUE(scheduling_graph.HasImmediateDataDependency(div, div_check));
+ ASSERT_TRUE(scheduling_graph.HasImmediateDataDependency(array_set1, add1));
+ ASSERT_TRUE(scheduling_graph.HasImmediateDataDependency(array_set1, add2));
+
+ // Read and write dependencies
+ ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_set1, array_get1));
+ ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_set2, array_get2));
+ ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_get2, array_set1));
+ ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_set2, array_set1));
+
+ // Env dependency.
+ ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(div_check, mul));
+ ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(mul, div_check));
+
+ // CanThrow.
+ ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_set1, div_check));
+}
+#endif
+
+static void CompileWithRandomSchedulerAndRun(const uint16_t* data,
+ bool has_result,
+ int expected) {
+ for (CodegenTargetConfig target_config : GetTargetConfigs()) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ HGraph* graph = CreateCFG(&arena, data);
+
+ // Schedule the graph randomly.
+ HInstructionScheduling scheduling(graph, target_config.GetInstructionSet());
+ scheduling.Run(/*only_optimize_loop_blocks*/ false, /*schedule_randomly*/ true);
+
+ RunCode(target_config,
+ graph,
+ [](HGraph* graph_arg) { RemoveSuspendChecks(graph_arg); },
+ has_result, expected);
+ }
+}
+
+TEST_F(SchedulerTest, RandomScheduling) {
+ //
+ // Java source: crafted code to make sure (random) scheduling should get correct result.
+ //
+ // int result = 0;
+ // float fr = 10.0f;
+ // for (int i = 1; i < 10; i++) {
+ // fr ++;
+ // int t1 = result >> i;
+ // int t2 = result * i;
+ // result = result + t1 - t2;
+ // fr = fr / i;
+ // result += (int)fr;
+ // }
+ // return result;
+ //
+ const uint16_t data[] = SIX_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 0 << 12 | 2 << 8, // const/4 v2, #int 0
+ Instruction::CONST_HIGH16 | 0 << 8, 0x4120, // const/high16 v0, #float 10.0 // #41200000
+ Instruction::CONST_4 | 1 << 12 | 1 << 8, // const/4 v1, #int 1
+ Instruction::CONST_16 | 5 << 8, 0x000a, // const/16 v5, #int 10
+ Instruction::IF_GE | 5 << 12 | 1 << 8, 0x0014, // if-ge v1, v5, 001a // +0014
+ Instruction::CONST_HIGH16 | 5 << 8, 0x3f80, // const/high16 v5, #float 1.0 // #3f800000
+ Instruction::ADD_FLOAT_2ADDR | 5 << 12 | 0 << 8, // add-float/2addr v0, v5
+ Instruction::SHR_INT | 3 << 8, 1 << 8 | 2 , // shr-int v3, v2, v1
+ Instruction::MUL_INT | 4 << 8, 1 << 8 | 2, // mul-int v4, v2, v1
+ Instruction::ADD_INT | 5 << 8, 3 << 8 | 2, // add-int v5, v2, v3
+ Instruction::SUB_INT | 2 << 8, 4 << 8 | 5, // sub-int v2, v5, v4
+ Instruction::INT_TO_FLOAT | 1 << 12 | 5 << 8, // int-to-float v5, v1
+ Instruction::DIV_FLOAT_2ADDR | 5 << 12 | 0 << 8, // div-float/2addr v0, v5
+ Instruction::FLOAT_TO_INT | 0 << 12 | 5 << 8, // float-to-int v5, v0
+ Instruction::ADD_INT_2ADDR | 5 << 12 | 2 << 8, // add-int/2addr v2, v5
+ Instruction::ADD_INT_LIT8 | 1 << 8, 1 << 8 | 1, // add-int/lit8 v1, v1, #int 1 // #01
+ Instruction::GOTO | 0xeb << 8, // goto 0004 // -0015
+ Instruction::RETURN | 2 << 8); // return v2
+
+ constexpr int kNumberOfRuns = 10;
+ for (int i = 0; i < kNumberOfRuns; ++i) {
+ CompileWithRandomSchedulerAndRun(data, true, 138774);
+ }
+}
+
+} // namespace art
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index c529410..f07f02a 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -42,8 +42,6 @@
HInstruction* instruction = it.Current();
if (instruction->IsInvokeStaticOrDirect()) {
ProcessInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect());
- } else if (instruction->IsLoadClass()) {
- ProcessLoadClass(instruction->AsLoadClass());
} else if (instruction->IsLoadString()) {
ProcessLoadString(instruction->AsLoadString());
}
@@ -97,7 +95,9 @@
// class is initialized already or being initialized, and the call will not
// be invoked once the method is deoptimized.
- if (callee == codegen_->GetGraph()->GetArtMethod()) {
+ // We don't optimize for debuggable as it would prevent us from obsoleting the method in some
+ // situations.
+ if (callee == codegen_->GetGraph()->GetArtMethod() && !codegen_->GetGraph()->IsDebuggable()) {
// Recursive call.
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRecursive;
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallSelf;
@@ -131,104 +131,93 @@
invoke->SetDispatchInfo(dispatch_info);
}
-void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
- ScopedObjectAccess soa(Thread::Current());
- SharpenClass(load_class, codegen_, compiler_driver_);
-}
-
-void HSharpening::SharpenClass(HLoadClass* load_class,
- CodeGenerator* codegen,
- CompilerDriver* compiler_driver) {
+HLoadClass::LoadKind HSharpening::SharpenClass(HLoadClass* load_class,
+ CodeGenerator* codegen,
+ CompilerDriver* compiler_driver,
+ const DexCompilationUnit& dex_compilation_unit) {
Handle<mirror::Class> klass = load_class->GetClass();
DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCacheViaMethod ||
load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass)
<< load_class->GetLoadKind();
DCHECK(!load_class->IsInBootImage()) << "HLoadClass should not be optimized before sharpening.";
+ HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
+
if (load_class->NeedsAccessCheck()) {
// We need to call the runtime anyway, so we simply get the class as that call's return value.
- return;
- }
-
- if (load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) {
+ } else if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
// Loading from the ArtMethod* is the most efficient retrieval in code size.
// TODO: This may not actually be true for all architectures and
// locations of target classes. The additional register pressure
// for using the ArtMethod* should be considered.
- return;
- }
-
- const DexFile& dex_file = load_class->GetDexFile();
- dex::TypeIndex type_index = load_class->GetTypeIndex();
-
- bool is_in_boot_image = false;
- HLoadClass::LoadKind desired_load_kind = static_cast<HLoadClass::LoadKind>(-1);
- Runtime* runtime = Runtime::Current();
- if (codegen->GetCompilerOptions().IsBootImage()) {
- // Compiling boot image. Check if the class is a boot image class.
- DCHECK(!runtime->UseJitCompilation());
- if (!compiler_driver->GetSupportBootImageFixup()) {
- // compiler_driver_test. Do not sharpen.
- desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
- } else if ((klass.Get() != nullptr) && compiler_driver->IsImageClass(
- dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
- is_in_boot_image = true;
- desired_load_kind = codegen->GetCompilerOptions().GetCompilePic()
- ? HLoadClass::LoadKind::kBootImageLinkTimePcRelative
- : HLoadClass::LoadKind::kBootImageLinkTimeAddress;
- } else {
- // Not a boot image class.
- DCHECK(ContainsElement(compiler_driver->GetDexFilesForOatFile(), &dex_file));
- desired_load_kind = HLoadClass::LoadKind::kBssEntry;
- }
} else {
- is_in_boot_image = (klass.Get() != nullptr) &&
- runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
- if (runtime->UseJitCompilation()) {
- // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
- // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
- if (is_in_boot_image) {
- // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
- desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
- } else if (klass.Get() != nullptr) {
- desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
- } else {
- // Class not loaded yet. This happens when the dex code requesting
- // this `HLoadClass` hasn't been executed in the interpreter.
- // Fallback to the dex cache.
- // TODO(ngeoffray): Generate HDeoptimize instead.
+ const DexFile& dex_file = load_class->GetDexFile();
+ dex::TypeIndex type_index = load_class->GetTypeIndex();
+
+ bool is_in_boot_image = false;
+ HLoadClass::LoadKind desired_load_kind = HLoadClass::LoadKind::kInvalid;
+ Runtime* runtime = Runtime::Current();
+ if (codegen->GetCompilerOptions().IsBootImage()) {
+ // Compiling boot image. Check if the class is a boot image class.
+ DCHECK(!runtime->UseJitCompilation());
+ if (!compiler_driver->GetSupportBootImageFixup()) {
+ // compiler_driver_test. Do not sharpen.
desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ } else if ((klass.Get() != nullptr) && compiler_driver->IsImageClass(
+ dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
+ is_in_boot_image = true;
+ desired_load_kind = codegen->GetCompilerOptions().GetCompilePic()
+ ? HLoadClass::LoadKind::kBootImageLinkTimePcRelative
+ : HLoadClass::LoadKind::kBootImageLinkTimeAddress;
+ } else {
+ // Not a boot image class.
+ DCHECK(ContainsElement(compiler_driver->GetDexFilesForOatFile(), &dex_file));
+ desired_load_kind = HLoadClass::LoadKind::kBssEntry;
}
- } else if (is_in_boot_image && !codegen->GetCompilerOptions().GetCompilePic()) {
- // AOT app compilation. Check if the class is in the boot image.
- desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
} else {
- // Not JIT and either the klass is not in boot image or we are compiling in PIC mode.
- desired_load_kind = HLoadClass::LoadKind::kBssEntry;
+ is_in_boot_image = (klass.Get() != nullptr) &&
+ runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
+ if (runtime->UseJitCompilation()) {
+ // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
+ // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
+ if (is_in_boot_image) {
+ // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
+ desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
+ } else if (klass.Get() != nullptr) {
+ desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
+ } else {
+ // Class not loaded yet. This happens when the dex code requesting
+ // this `HLoadClass` hasn't been executed in the interpreter.
+ // Fallback to the dex cache.
+ // TODO(ngeoffray): Generate HDeoptimize instead.
+ desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ }
+ } else if (is_in_boot_image && !codegen->GetCompilerOptions().GetCompilePic()) {
+ // AOT app compilation. Check if the class is in the boot image.
+ desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
+ } else {
+ // Not JIT and either the klass is not in boot image or we are compiling in PIC mode.
+ desired_load_kind = HLoadClass::LoadKind::kBssEntry;
+ }
+ }
+ DCHECK_NE(desired_load_kind, HLoadClass::LoadKind::kInvalid);
+
+ if (is_in_boot_image) {
+ load_class->MarkInBootImage();
+ }
+ load_kind = codegen->GetSupportedLoadClassKind(desired_load_kind);
+ }
+
+ if (!IsSameDexFile(load_class->GetDexFile(), *dex_compilation_unit.GetDexFile())) {
+ if ((load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) ||
+ (load_kind == HLoadClass::LoadKind::kBssEntry)) {
+ // We actually cannot reference this class, we're forced to bail.
+ // We cannot reference this class with Bss, as the entrypoint will lookup the class
+ // in the caller's dex file, but that dex file does not reference the class.
+ return HLoadClass::LoadKind::kInvalid;
}
}
- DCHECK_NE(desired_load_kind, static_cast<HLoadClass::LoadKind>(-1));
-
- if (is_in_boot_image) {
- load_class->MarkInBootImage();
- }
-
- HLoadClass::LoadKind load_kind = codegen->GetSupportedLoadClassKind(desired_load_kind);
- switch (load_kind) {
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadClass::LoadKind::kBssEntry:
- case HLoadClass::LoadKind::kDexCacheViaMethod:
- load_class->SetLoadKindWithTypeReference(load_kind, dex_file, type_index);
- break;
- case HLoadClass::LoadKind::kBootImageAddress:
- case HLoadClass::LoadKind::kJitTableAddress:
- load_class->SetLoadKind(load_kind);
- break;
- default:
- LOG(FATAL) << "Unexpected load kind: " << load_kind;
- UNREACHABLE();
- }
+ return load_kind;
}
void HSharpening::ProcessLoadString(HLoadString* load_string) {
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index ae3d83e..4240b2f 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_SHARPENING_H_
#define ART_COMPILER_OPTIMIZING_SHARPENING_H_
+#include "nodes.h"
#include "optimization.h"
namespace art {
@@ -24,7 +25,6 @@
class CodeGenerator;
class CompilerDriver;
class DexCompilationUnit;
-class HInvokeStaticOrDirect;
// Optimization that tries to improve the way we dispatch methods and access types,
// fields, etc. Besides actual method sharpening based on receiver type (for example
@@ -47,15 +47,15 @@
static constexpr const char* kSharpeningPassName = "sharpening";
- // Used internally but also by the inliner.
- static void SharpenClass(HLoadClass* load_class,
- CodeGenerator* codegen,
- CompilerDriver* compiler_driver)
+ // Used by the builder and the inliner.
+ static HLoadClass::LoadKind SharpenClass(HLoadClass* load_class,
+ CodeGenerator* codegen,
+ CompilerDriver* compiler_driver,
+ const DexCompilationUnit& dex_compilation_unit)
REQUIRES_SHARED(Locks::mutator_lock_);
private:
void ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke);
- void ProcessLoadClass(HLoadClass* load_class);
void ProcessLoadString(HLoadString* load_string);
CodeGenerator* codegen_;
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index ae1e369..487e4dd 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -17,8 +17,10 @@
#include "ssa_builder.h"
#include "bytecode_utils.h"
+#include "mirror/class-inl.h"
#include "nodes.h"
#include "reference_type_propagation.h"
+#include "scoped_thread_state_change-inl.h"
#include "ssa_phi_elimination.h"
namespace art {
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index a9a1e6f..10f5cab 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -16,7 +16,11 @@
#include "stack_map_stream.h"
-#include "art_method.h"
+#include <unordered_map>
+
+#include "art_method-inl.h"
+#include "base/stl_util.h"
+#include "optimizing/optimizing_compiler.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
@@ -40,6 +44,7 @@
current_entry_.inline_infos_start_index = inline_infos_.size();
current_entry_.dex_register_map_hash = 0;
current_entry_.same_dex_register_map_as_ = kNoSameDexMapFound;
+ current_entry_.stack_mask_index = 0;
if (num_dex_registers != 0) {
current_entry_.live_dex_registers_mask =
ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream);
@@ -103,11 +108,6 @@
current_dex_register_++;
}
-static bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
- // Note: the runtime is null only for unit testing.
- return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
-}
-
void StackMapStream::BeginInlineInfoEntry(ArtMethod* method,
uint32_t dex_pc,
uint32_t num_dex_registers,
@@ -153,32 +153,43 @@
}
size_t StackMapStream::PrepareForFillIn() {
- int stack_mask_number_of_bits = stack_mask_max_ + 1; // Need room for max element too.
+ const size_t stack_mask_size_in_bits = stack_mask_max_ + 1; // Need room for max element too.
+ const size_t number_of_stack_masks = PrepareStackMasks(stack_mask_size_in_bits);
+ const size_t register_mask_size_in_bits = MinimumBitsToStore(register_mask_max_);
+ const size_t number_of_register_masks = PrepareRegisterMasks();
dex_register_maps_size_ = ComputeDexRegisterMapsSize();
ComputeInlineInfoEncoding(); // needs dex_register_maps_size_.
inline_info_size_ = inline_infos_.size() * inline_info_encoding_.GetEntrySize();
CodeOffset max_native_pc_offset = ComputeMaxNativePcCodeOffset();
- // The stack map contains compressed native offsets.
- size_t stack_map_size = stack_map_encoding_.SetFromSizes(max_native_pc_offset.CompressedValue(),
- dex_pc_max_,
- dex_register_maps_size_,
- inline_info_size_,
- register_mask_max_,
- stack_mask_number_of_bits);
- stack_maps_size_ = stack_maps_.size() * stack_map_size;
+ // The stack map contains compressed native PC offsets.
+ const size_t stack_map_size = stack_map_encoding_.SetFromSizes(
+ max_native_pc_offset.CompressedValue(),
+ dex_pc_max_,
+ dex_register_maps_size_,
+ inline_info_size_,
+ number_of_register_masks,
+ number_of_stack_masks);
+ stack_maps_size_ = RoundUp(stack_maps_.size() * stack_map_size, kBitsPerByte) / kBitsPerByte;
dex_register_location_catalog_size_ = ComputeDexRegisterLocationCatalogSize();
-
- size_t non_header_size =
+ const size_t stack_masks_bits = number_of_stack_masks * stack_mask_size_in_bits;
+ const size_t register_masks_bits = number_of_register_masks * register_mask_size_in_bits;
+ // Register masks are last, stack masks are right before that last.
+ // They are both bit packed / aligned.
+ const size_t non_header_size =
stack_maps_size_ +
dex_register_location_catalog_size_ +
dex_register_maps_size_ +
- inline_info_size_;
+ inline_info_size_ +
+ RoundUp(stack_masks_bits + register_masks_bits, kBitsPerByte) / kBitsPerByte;
// Prepare the CodeInfo variable-sized encoding.
CodeInfoEncoding code_info_encoding;
code_info_encoding.non_header_size = non_header_size;
code_info_encoding.number_of_stack_maps = stack_maps_.size();
- code_info_encoding.stack_map_size_in_bytes = stack_map_size;
+ code_info_encoding.number_of_stack_masks = number_of_stack_masks;
+ code_info_encoding.number_of_register_masks = number_of_register_masks;
+ code_info_encoding.stack_mask_size_in_bits = stack_mask_size_in_bits;
+ code_info_encoding.register_mask_size_in_bits = register_mask_size_in_bits;
code_info_encoding.stack_map_encoding = stack_map_encoding_;
code_info_encoding.inline_info_encoding = inline_info_encoding_;
code_info_encoding.number_of_location_catalog_entries = location_catalog_entries_.size();
@@ -321,18 +332,8 @@
stack_map.SetDexPc(stack_map_encoding_, entry.dex_pc);
stack_map.SetNativePcCodeOffset(stack_map_encoding_, entry.native_pc_code_offset);
- stack_map.SetRegisterMask(stack_map_encoding_, entry.register_mask);
- size_t number_of_stack_mask_bits = stack_map.GetNumberOfStackMaskBits(stack_map_encoding_);
- if (entry.sp_mask != nullptr) {
- for (size_t bit = 0; bit < number_of_stack_mask_bits; bit++) {
- stack_map.SetStackMaskBit(stack_map_encoding_, bit, entry.sp_mask->IsBitSet(bit));
- }
- } else {
- // The MemoryRegion does not have to be zeroed, so make sure we clear the bits.
- for (size_t bit = 0; bit < number_of_stack_mask_bits; bit++) {
- stack_map.SetStackMaskBit(stack_map_encoding_, bit, false);
- }
- }
+ stack_map.SetRegisterMaskIndex(stack_map_encoding_, entry.register_mask_index);
+ stack_map.SetStackMaskIndex(stack_map_encoding_, entry.stack_mask_index);
if (entry.num_dex_registers == 0 || (entry.live_dex_registers_mask->NumSetBits() == 0)) {
// No dex map available.
@@ -353,7 +354,7 @@
next_dex_register_map_offset += register_region.size();
DexRegisterMap dex_register_map(register_region);
stack_map.SetDexRegisterMapOffset(
- stack_map_encoding_, register_region.start() - dex_register_locations_region.start());
+ stack_map_encoding_, register_region.begin() - dex_register_locations_region.begin());
// Set the dex register location.
FillInDexRegisterMap(dex_register_map,
@@ -373,7 +374,7 @@
// Currently relative to the dex register map.
stack_map.SetInlineDescriptorOffset(
- stack_map_encoding_, inline_region.start() - dex_register_locations_region.start());
+ stack_map_encoding_, inline_region.begin() - dex_register_locations_region.begin());
inline_info.SetDepth(inline_info_encoding_, entry.inlining_depth);
DCHECK_LE(entry.inline_infos_start_index + entry.inlining_depth, inline_infos_.size());
@@ -408,7 +409,7 @@
DexRegisterMap dex_register_map(register_region);
inline_info.SetDexRegisterMapOffsetAtDepth(
inline_info_encoding_,
- depth, register_region.start() - dex_register_locations_region.start());
+ depth, register_region.begin() - dex_register_locations_region.begin());
FillInDexRegisterMap(dex_register_map,
inline_entry.num_dex_registers,
@@ -423,6 +424,25 @@
}
}
+ // Write stack masks table.
+ size_t stack_mask_bits = encoding.stack_mask_size_in_bits;
+ if (stack_mask_bits > 0) {
+ size_t stack_mask_bytes = RoundUp(stack_mask_bits, kBitsPerByte) / kBitsPerByte;
+ for (size_t i = 0; i < encoding.number_of_stack_masks; ++i) {
+ MemoryRegion source(&stack_masks_[i * stack_mask_bytes], stack_mask_bytes);
+ BitMemoryRegion stack_mask = code_info.GetStackMask(encoding, i);
+ for (size_t bit_index = 0; bit_index < encoding.stack_mask_size_in_bits; ++bit_index) {
+ stack_mask.StoreBit(bit_index, source.LoadBit(bit_index));
+ }
+ }
+ }
+
+ // Write register masks table.
+ for (size_t i = 0; i < encoding.number_of_register_masks; ++i) {
+ BitMemoryRegion register_mask = code_info.GetRegisterMask(encoding, i);
+ register_mask.StoreBits(0, register_masks_[i], encoding.register_mask_size_in_bits);
+ }
+
// Verify all written data in debug build.
if (kIsDebugBuild) {
CheckCodeInfo(region);
@@ -536,6 +556,38 @@
}
}
+size_t StackMapStream::PrepareRegisterMasks() {
+ register_masks_.resize(stack_maps_.size(), 0u);
+ std::unordered_map<uint32_t, size_t> dedupe;
+ for (StackMapEntry& stack_map : stack_maps_) {
+ const size_t index = dedupe.size();
+ stack_map.register_mask_index = dedupe.emplace(stack_map.register_mask, index).first->second;
+ register_masks_[index] = stack_map.register_mask;
+ }
+ return dedupe.size();
+}
+
+size_t StackMapStream::PrepareStackMasks(size_t entry_size_in_bits) {
+ // Preallocate memory since we do not want it to move (the dedup map will point into it).
+ const size_t byte_entry_size = RoundUp(entry_size_in_bits, kBitsPerByte) / kBitsPerByte;
+ stack_masks_.resize(byte_entry_size * stack_maps_.size(), 0u);
+ // For deduplicating we store the stack masks as byte packed for simplicity. We can bit pack later
+ // when copying out from stack_masks_.
+ std::unordered_map<MemoryRegion,
+ size_t,
+ FNVHash<MemoryRegion>,
+ MemoryRegion::ContentEquals> dedup(stack_maps_.size());
+ for (StackMapEntry& stack_map : stack_maps_) {
+ size_t index = dedup.size();
+ MemoryRegion stack_mask(stack_masks_.data() + index * byte_entry_size, byte_entry_size);
+ for (size_t i = 0; i < entry_size_in_bits; i++) {
+ stack_mask.StoreBit(i, stack_map.sp_mask != nullptr && stack_map.sp_mask->IsBitSet(i));
+ }
+ stack_map.stack_mask_index = dedup.emplace(stack_mask, index).first->second;
+ }
+ return dedup.size();
+}
+
// Check that all StackMapStream inputs are correctly encoded by trying to read them back.
void StackMapStream::CheckCodeInfo(MemoryRegion region) const {
CodeInfo code_info(region);
@@ -550,16 +602,19 @@
DCHECK_EQ(stack_map.GetNativePcOffset(stack_map_encoding, instruction_set_),
entry.native_pc_code_offset.Uint32Value(instruction_set_));
DCHECK_EQ(stack_map.GetDexPc(stack_map_encoding), entry.dex_pc);
- DCHECK_EQ(stack_map.GetRegisterMask(stack_map_encoding), entry.register_mask);
- size_t num_stack_mask_bits = stack_map.GetNumberOfStackMaskBits(stack_map_encoding);
+ DCHECK_EQ(stack_map.GetRegisterMaskIndex(stack_map_encoding), entry.register_mask_index);
+ DCHECK_EQ(code_info.GetRegisterMaskOf(encoding, stack_map), entry.register_mask);
+ const size_t num_stack_mask_bits = code_info.GetNumberOfStackMaskBits(encoding);
+ DCHECK_EQ(stack_map.GetStackMaskIndex(stack_map_encoding), entry.stack_mask_index);
+ BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
if (entry.sp_mask != nullptr) {
- DCHECK_GE(num_stack_mask_bits, entry.sp_mask->GetNumberOfBits());
+ DCHECK_GE(stack_mask.size_in_bits(), entry.sp_mask->GetNumberOfBits());
for (size_t b = 0; b < num_stack_mask_bits; b++) {
- DCHECK_EQ(stack_map.GetStackMaskBit(stack_map_encoding, b), entry.sp_mask->IsBitSet(b));
+ DCHECK_EQ(stack_mask.LoadBit(b), entry.sp_mask->IsBitSet(b));
}
} else {
for (size_t b = 0; b < num_stack_mask_bits; b++) {
- DCHECK_EQ(stack_map.GetStackMaskBit(stack_map_encoding, b), 0u);
+ DCHECK_EQ(stack_mask.LoadBit(b), 0u);
}
}
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 8fec472..b1069a1 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -68,6 +68,8 @@
location_catalog_entries_indices_(allocator->Adapter(kArenaAllocStackMapStream)),
dex_register_locations_(allocator->Adapter(kArenaAllocStackMapStream)),
inline_infos_(allocator->Adapter(kArenaAllocStackMapStream)),
+ stack_masks_(allocator->Adapter(kArenaAllocStackMapStream)),
+ register_masks_(allocator->Adapter(kArenaAllocStackMapStream)),
stack_mask_max_(-1),
dex_pc_max_(0),
register_mask_max_(0),
@@ -107,6 +109,8 @@
BitVector* live_dex_registers_mask;
uint32_t dex_register_map_hash;
size_t same_dex_register_map_as_;
+ uint32_t stack_mask_index;
+ uint32_t register_mask_index;
};
struct InlineInfoEntry {
@@ -160,6 +164,12 @@
CodeOffset ComputeMaxNativePcCodeOffset() const;
+ // Returns the number of unique stack masks.
+ size_t PrepareStackMasks(size_t entry_size_in_bits);
+
+ // Returns the number of unique register masks.
+ size_t PrepareRegisterMasks();
+
// Returns the index of an entry with the same dex register map as the current_entry,
// or kNoSameDexMapFound if no such entry exists.
size_t FindEntryWithTheSameDexMap();
@@ -193,6 +203,8 @@
// A set of concatenated maps of Dex register locations indices to `location_catalog_entries_`.
ArenaVector<size_t> dex_register_locations_;
ArenaVector<InlineInfoEntry> inline_infos_;
+ ArenaVector<uint8_t> stack_masks_;
+ ArenaVector<uint32_t> register_masks_;
int stack_mask_max_;
uint32_t dex_pc_max_;
uint32_t register_mask_max_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index f68695b..ce6d5c2 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -27,15 +27,16 @@
// Check that the stack mask of given stack map is identical
// to the given bit vector. Returns true if they are same.
static bool CheckStackMask(
+ const CodeInfo& code_info,
+ const CodeInfoEncoding& encoding,
const StackMap& stack_map,
- StackMapEncoding& encoding,
const BitVector& bit_vector) {
- int number_of_bits = stack_map.GetNumberOfStackMaskBits(encoding);
- if (bit_vector.GetHighestBitSet() >= number_of_bits) {
+ BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
+ if (bit_vector.GetNumberOfBits() > encoding.stack_mask_size_in_bits) {
return false;
}
- for (int i = 0; i < number_of_bits; ++i) {
- if (stack_map.GetStackMaskBit(encoding, i) != bit_vector.IsBitSet(i)) {
+ for (size_t i = 0; i < encoding.stack_mask_size_in_bits; ++i) {
+ if (stack_mask.LoadBit(i) != bit_vector.IsBitSet(i)) {
return false;
}
}
@@ -79,9 +80,9 @@
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
- ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
+ ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
- ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask));
+ ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask));
ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
DexRegisterMap dex_register_map =
@@ -194,9 +195,9 @@
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
- ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
+ ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
- ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask1));
+ ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask1));
ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
DexRegisterMap dex_register_map =
@@ -253,9 +254,9 @@
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u, encoding)));
ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map_encoding));
ASSERT_EQ(128u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
- ASSERT_EQ(0xFFu, stack_map.GetRegisterMask(encoding.stack_map_encoding));
+ ASSERT_EQ(0xFFu, code_info.GetRegisterMaskOf(encoding, stack_map));
- ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask2));
+ ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask2));
ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
DexRegisterMap dex_register_map =
@@ -307,9 +308,9 @@
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(192u, encoding)));
ASSERT_EQ(2u, stack_map.GetDexPc(encoding.stack_map_encoding));
ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
- ASSERT_EQ(0xABu, stack_map.GetRegisterMask(encoding.stack_map_encoding));
+ ASSERT_EQ(0xABu, code_info.GetRegisterMaskOf(encoding, stack_map));
- ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask3));
+ ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask3));
ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
DexRegisterMap dex_register_map =
@@ -361,9 +362,9 @@
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(256u, encoding)));
ASSERT_EQ(3u, stack_map.GetDexPc(encoding.stack_map_encoding));
ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
- ASSERT_EQ(0xCDu, stack_map.GetRegisterMask(encoding.stack_map_encoding));
+ ASSERT_EQ(0xCDu, code_info.GetRegisterMaskOf(encoding, stack_map));
- ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask4));
+ ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask4));
ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
DexRegisterMap dex_register_map =
@@ -443,7 +444,7 @@
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
- ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
+ ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
DexRegisterMap dex_register_map =
@@ -642,7 +643,7 @@
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
- ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
+ ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
@@ -652,7 +653,7 @@
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(68, encoding)));
ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map_encoding));
ASSERT_EQ(68u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
- ASSERT_EQ(0x4u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
+ ASSERT_EQ(0x4u, code_info.GetRegisterMaskOf(encoding, stack_map));
ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
@@ -839,4 +840,33 @@
EXPECT_EQ(offset_mips64.Uint32Value(kMips64), kMips64InstructionAlignment);
}
+
+TEST(StackMapTest, TestDeduplicateStackMask) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ StackMapStream stream(&arena, kRuntimeISA);
+
+ ArenaBitVector sp_mask(&arena, 0, true);
+ sp_mask.SetBit(1);
+ sp_mask.SetBit(4);
+ stream.BeginStackMapEntry(0, 4, 0x3, &sp_mask, 0, 0);
+ stream.EndStackMapEntry();
+ stream.BeginStackMapEntry(0, 8, 0x3, &sp_mask, 0, 0);
+ stream.EndStackMapEntry();
+
+ size_t size = stream.PrepareForFillIn();
+ void* memory = arena.Alloc(size, kArenaAllocMisc);
+ MemoryRegion region(memory, size);
+ stream.FillIn(region);
+
+ CodeInfo code_info(region);
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ ASSERT_EQ(2u, code_info.GetNumberOfStackMaps(encoding));
+
+ StackMap stack_map1 = code_info.GetStackMapForNativePcOffset(4, encoding);
+ StackMap stack_map2 = code_info.GetStackMapForNativePcOffset(8, encoding);
+ EXPECT_EQ(stack_map1.GetStackMaskIndex(encoding.stack_map_encoding),
+ stack_map2.GetStackMaskIndex(encoding.stack_map_encoding));
+}
+
} // namespace art
diff --git a/compiler/utils/assembler_test_base.h b/compiler/utils/assembler_test_base.h
index e7edf96..d76cb1c 100644
--- a/compiler/utils/assembler_test_base.h
+++ b/compiler/utils/assembler_test_base.h
@@ -26,6 +26,7 @@
#include "android-base/strings.h"
#include "common_runtime_test.h" // For ScratchFile
+#include "exec_utils.h"
#include "utils.h"
namespace art {
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index f132e27..071cd57 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -5610,7 +5610,7 @@
" 214: ecbd 8a10 vpop {s16-s31}\n",
" 218: e8bd 8de0 ldmia.w sp!, {r5, r6, r7, r8, sl, fp, pc}\n",
" 21c: 4660 mov r0, ip\n",
- " 21e: f8d9 c2a4 ldr.w ip, [r9, #676] ; 0x2a4\n",
+ " 21e: f8d9 c2b4 ldr.w ip, [r9, #692] ; 0x2b4\n",
" 222: 47e0 blx ip\n",
nullptr
};
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index d3b15ac..a24d49e 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1057,6 +1057,25 @@
}
+void X86Assembler::shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xC6);
+ EmitXmmRegisterOperand(dst, src);
+ EmitUint8(imm.value());
+}
+
+
+void X86Assembler::shufps(XmmRegister dst, XmmRegister src, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xC6);
+ EmitXmmRegisterOperand(dst, src);
+ EmitUint8(imm.value());
+}
+
+
void X86Assembler::fldl(const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xDD);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index a93616c..4056ca6 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -472,6 +472,9 @@
void orpd(XmmRegister dst, XmmRegister src);
void orps(XmmRegister dst, XmmRegister src);
+ void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm);
+ void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm);
+
void flds(const Address& src);
void fstps(const Address& dst);
void fsts(const Address& dst);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 4d60a12..1768d8b 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -468,51 +468,43 @@
}
TEST_F(AssemblerX86Test, AddPS) {
- GetAssembler()->addps(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
- const char* expected = "addps %xmm1, %xmm0\n";
- DriverStr(expected, "addps");
+ DriverStr(RepeatFF(&x86::X86Assembler::addps, "addps %{reg2}, %{reg1}"), "addps");
}
TEST_F(AssemblerX86Test, AddPD) {
- GetAssembler()->addpd(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
- const char* expected = "addpd %xmm1, %xmm0\n";
- DriverStr(expected, "addpd");
+ DriverStr(RepeatFF(&x86::X86Assembler::addpd, "addpd %{reg2}, %{reg1}"), "addpd");
}
TEST_F(AssemblerX86Test, SubPS) {
- GetAssembler()->subps(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
- const char* expected = "subps %xmm1, %xmm0\n";
- DriverStr(expected, "subps");
+ DriverStr(RepeatFF(&x86::X86Assembler::subps, "subps %{reg2}, %{reg1}"), "subps");
}
TEST_F(AssemblerX86Test, SubPD) {
- GetAssembler()->subpd(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
- const char* expected = "subpd %xmm1, %xmm0\n";
- DriverStr(expected, "subpd");
+ DriverStr(RepeatFF(&x86::X86Assembler::subpd, "subpd %{reg2}, %{reg1}"), "subpd");
}
TEST_F(AssemblerX86Test, MulPS) {
- GetAssembler()->mulps(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
- const char* expected = "mulps %xmm1, %xmm0\n";
- DriverStr(expected, "mulps");
+ DriverStr(RepeatFF(&x86::X86Assembler::mulps, "mulps %{reg2}, %{reg1}"), "mulps");
}
TEST_F(AssemblerX86Test, MulPD) {
- GetAssembler()->mulpd(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
- const char* expected = "mulpd %xmm1, %xmm0\n";
- DriverStr(expected, "mulpd");
+ DriverStr(RepeatFF(&x86::X86Assembler::mulpd, "mulpd %{reg2}, %{reg1}"), "mulpd");
}
TEST_F(AssemblerX86Test, DivPS) {
- GetAssembler()->divps(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
- const char* expected = "divps %xmm1, %xmm0\n";
- DriverStr(expected, "divps");
+ DriverStr(RepeatFF(&x86::X86Assembler::divps, "divps %{reg2}, %{reg1}"), "divps");
}
TEST_F(AssemblerX86Test, DivPD) {
- GetAssembler()->divpd(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
- const char* expected = "divpd %xmm1, %xmm0\n";
- DriverStr(expected, "divpd");
+ DriverStr(RepeatFF(&x86::X86Assembler::divpd, "divpd %{reg2}, %{reg1}"), "divpd");
+}
+
+TEST_F(AssemblerX86Test, ShufPS) {
+ DriverStr(RepeatFFI(&x86::X86Assembler::shufps, 1, "shufps ${imm}, %{reg2}, %{reg1}"), "shufps");
+}
+
+TEST_F(AssemblerX86Test, ShufPD) {
+ DriverStr(RepeatFFI(&x86::X86Assembler::shufpd, 1, "shufpd ${imm}, %{reg2}, %{reg1}"), "shufpd");
}
/////////////////
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 2366b68..c2c44ab 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1213,6 +1213,28 @@
EmitXmmRegisterOperand(dst.LowBits(), src);
}
+
+void X86_64Assembler::shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0xC6);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+ EmitUint8(imm.value());
+}
+
+
+void X86_64Assembler::shufps(XmmRegister dst, XmmRegister src, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0xC6);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+ EmitUint8(imm.value());
+}
+
+
void X86_64Assembler::fldl(const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xDD);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 5923a41..e140b45 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -495,6 +495,9 @@
void orpd(XmmRegister dst, XmmRegister src);
void orps(XmmRegister dst, XmmRegister src);
+ void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm);
+ void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm);
+
void flds(const Address& src);
void fstps(const Address& dst);
void fsts(const Address& dst);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 2812c34..efa5cc9 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -1203,6 +1203,14 @@
DriverStr(RepeatFF(&x86_64::X86_64Assembler::orpd, "orpd %{reg2}, %{reg1}"), "orpd");
}
+TEST_F(AssemblerX86_64Test, Shufps) {
+ DriverStr(RepeatFFI(&x86_64::X86_64Assembler::shufps, 1, "shufps ${imm}, %{reg2}, %{reg1}"), "shufps");
+}
+
+TEST_F(AssemblerX86_64Test, Shufpd) {
+ DriverStr(RepeatFFI(&x86_64::X86_64Assembler::shufpd, 1, "shufpd ${imm}, %{reg2}, %{reg1}"), "shufpd");
+}
+
TEST_F(AssemblerX86_64Test, UcomissAddress) {
GetAssembler()->ucomiss(x86_64::XmmRegister(x86_64::XMM0), x86_64::Address(
x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12));
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index e8a92c1..196d8d4 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -277,7 +277,6 @@
"|balanced"
"|speed-profile"
"|speed"
- "|layout-profile"
"|everything-profile"
"|everything):");
UsageError(" select compiler filter.");
@@ -1283,13 +1282,10 @@
DCHECK_EQ(input_vdex_fd_, -1);
if (!input_vdex_.empty()) {
std::string error_msg;
- input_vdex_file_.reset(VdexFile::Open(input_vdex_,
- /* writable */ false,
- /* low_4gb */ false,
- &error_msg));
- if (input_vdex_file_ != nullptr && !input_vdex_file_->IsValid()) {
- input_vdex_file_.reset(nullptr);
- }
+ input_vdex_file_ = VdexFile::Open(input_vdex_,
+ /* writable */ false,
+ /* low_4gb */ false,
+ &error_msg);
}
DCHECK_EQ(output_vdex_fd_, -1);
@@ -1331,19 +1327,16 @@
PLOG(WARNING) << "Failed getting length of vdex file";
} else {
std::string error_msg;
- input_vdex_file_.reset(VdexFile::Open(input_vdex_fd_,
- s.st_size,
- "vdex",
- /* writable */ false,
- /* low_4gb */ false,
- &error_msg));
+ input_vdex_file_ = VdexFile::Open(input_vdex_fd_,
+ s.st_size,
+ "vdex",
+ /* writable */ false,
+ /* low_4gb */ false,
+ &error_msg);
// If there's any problem with the passed vdex, just warn and proceed
// without it.
if (input_vdex_file_ == nullptr) {
- PLOG(WARNING) << "Failed opening vdex file " << error_msg;
- } else if (!input_vdex_file_->IsValid()) {
- PLOG(WARNING) << "Existing vdex file is invalid";
- input_vdex_file_.reset(nullptr);
+ PLOG(WARNING) << "Failed opening vdex file: " << error_msg;
}
}
}
@@ -1540,9 +1533,9 @@
std::unique_ptr<MemMap> opened_dex_files_map;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
// No need to verify the dex file for:
- // 1) dexlayout, which already verified it
+ // 1) kSpeedProfile, since it includes dexlayout, which does the verification.
// 2) when we have a vdex file, which means it was already verified.
- bool verify = compiler_options_->GetCompilerFilter() != CompilerFilter::kLayoutProfile &&
+ bool verify = compiler_options_->GetCompilerFilter() != CompilerFilter::kSpeedProfile &&
(input_vdex_file_ == nullptr);
if (!oat_writers_[i]->WriteAndOpenDexFiles(
kIsVdexEnabled ? vdex_files_[i].get() : oat_files_[i].get(),
@@ -2349,7 +2342,7 @@
compiler_options_.get(),
oat_file.get()));
elf_writers_.back()->Start();
- bool do_dexlayout = compiler_options_->GetCompilerFilter() == CompilerFilter::kLayoutProfile;
+ bool do_dexlayout = compiler_options_->GetCompilerFilter() == CompilerFilter::kSpeedProfile;
oat_writers_.emplace_back(new OatWriter(
IsBootImage(), timings_, do_dexlayout ? profile_compilation_info_.get() : nullptr));
}
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index e86e560..c2275ac 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -125,7 +125,7 @@
class_path = OatFile::kSpecialSharedLibrary;
}
argv.push_back(class_path);
- if (runtime->IsDebuggable()) {
+ if (runtime->IsJavaDebuggable()) {
argv.push_back("--debuggable");
}
runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
@@ -591,7 +591,7 @@
GenerateProfile(profile_location, dex_location, dex_file->GetLocationChecksum());
const std::vector<std::string>& extra_args = { "--profile-file=" + profile_location };
- GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kLayoutProfile, extra_args);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeedProfile, extra_args);
CheckValidity();
ASSERT_TRUE(success_);
@@ -632,7 +632,7 @@
EXPECT_EQ(old_class1, new_class0);
}
- EXPECT_EQ(odex_file->GetCompilerFilter(), CompilerFilter::kLayoutProfile);
+ EXPECT_EQ(odex_file->GetCompilerFilter(), CompilerFilter::kSpeedProfile);
}
// Check whether the dex2oat run was really successful.
diff --git a/dexdump/dexdump_test.cc b/dexdump/dexdump_test.cc
index 53dda6a..640f387 100644
--- a/dexdump/dexdump_test.cc
+++ b/dexdump/dexdump_test.cc
@@ -23,6 +23,7 @@
#include "common_runtime_test.h"
#include "runtime/arch/instruction_set.h"
+#include "runtime/exec_utils.h"
#include "runtime/os.h"
#include "runtime/utils.h"
#include "utils.h"
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 46a1c43..da1e1d2 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -23,6 +23,7 @@
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
+#include "exec_utils.h"
#include "utils.h"
namespace art {
diff --git a/dexlist/dexlist_test.cc b/dexlist/dexlist_test.cc
index 1320942..173a456 100644
--- a/dexlist/dexlist_test.cc
+++ b/dexlist/dexlist_test.cc
@@ -23,6 +23,7 @@
#include "common_runtime_test.h"
#include "runtime/arch/instruction_set.h"
+#include "runtime/exec_utils.h"
#include "runtime/gc/heap.h"
#include "runtime/gc/space/image_space.h"
#include "runtime/os.h"
diff --git a/dexoptanalyzer/Android.bp b/dexoptanalyzer/Android.bp
new file mode 100644
index 0000000..cf4c99e
--- /dev/null
+++ b/dexoptanalyzer/Android.bp
@@ -0,0 +1,68 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_defaults {
+ name: "dexoptanalyzer-defaults",
+ host_supported: true,
+ defaults: ["art_defaults"],
+ srcs: [
+ "dexoptanalyzer.cc",
+ ],
+
+ target: {
+ android: {
+ compile_multilib: "prefer32",
+ },
+ },
+
+ include_dirs: [
+ "art/cmdline",
+ ],
+
+ shared_libs: [
+ "libbase",
+ ],
+}
+
+art_cc_binary {
+ name: "dexoptanalyzer",
+ defaults: ["dexoptanalyzer-defaults"],
+ shared_libs: [
+ "libart",
+ ],
+}
+
+art_cc_binary {
+ name: "dexoptanalyzerd",
+ defaults: [
+ "dexoptanalyzer-defaults",
+ "art_debug_defaults",
+ ],
+ shared_libs: [
+ "libartd",
+ ],
+}
+
+art_cc_test {
+ name: "art_dexoptanalyzer_tests",
+ defaults: [
+ "art_gtest_defaults",
+ ],
+ shared_libs: [
+ "libbacktrace"
+ ],
+ srcs: ["dexoptanalyzer_test.cc"],
+}
diff --git a/dexoptanalyzer/dexoptanalyzer.cc b/dexoptanalyzer/dexoptanalyzer.cc
new file mode 100644
index 0000000..965e407
--- /dev/null
+++ b/dexoptanalyzer/dexoptanalyzer.cc
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+
+#include "android-base/stringprintf.h"
+#include "android-base/strings.h"
+#include "compiler_filter.h"
+#include "dex_file.h"
+#include "noop_compiler_callbacks.h"
+#include "oat_file_assistant.h"
+#include "os.h"
+#include "runtime.h"
+#include "thread-inl.h"
+#include "utils.h"
+
+namespace art {
+
+// See OatFileAssistant docs for the meaning of the valid return codes.
+enum ReturnCodes {
+ kNoDexOptNeeded = 0,
+ kDex2OatFromScratch = 1,
+ kDex2OatForBootImageOat = 2,
+ kDex2OatForFilterOat = 3,
+ kDex2OatForRelocationOat = 4,
+ kDex2OatForBootImageOdex = 5,
+ kDex2OatForFilterOdex = 6,
+ kDex2OatForRelocationOdex = 7,
+
+ kErrorInvalidArguments = 101,
+ kErrorCannotCreateRuntime = 102,
+ kErrorUnknownDexOptNeeded = 103
+};
+
+static int original_argc;
+static char** original_argv;
+
+static std::string CommandLine() {
+ std::vector<std::string> command;
+ for (int i = 0; i < original_argc; ++i) {
+ command.push_back(original_argv[i]);
+ }
+ return android::base::Join(command, ' ');
+}
+
+static void UsageErrorV(const char* fmt, va_list ap) {
+ std::string error;
+ android::base::StringAppendV(&error, fmt, ap);
+ LOG(ERROR) << error;
+}
+
+static void UsageError(const char* fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ UsageErrorV(fmt, ap);
+ va_end(ap);
+}
+
+NO_RETURN static void Usage(const char *fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ UsageErrorV(fmt, ap);
+ va_end(ap);
+
+ UsageError("Command: %s", CommandLine().c_str());
+ UsageError(" Performs a dexopt analysis on the given dex file and returns whether or not");
+ UsageError(" the dex file needs to be dexopted.");
+ UsageError("Usage: dexoptanalyzer [options]...");
+ UsageError("");
+ UsageError(" --dex-file=<filename>: the dex file which should be analyzed.");
+ UsageError("");
+ UsageError(" --isa=<string>: the instruction set for which the analysis should be performed.");
+ UsageError("");
+ UsageError(" --compiler-filter=<string>: the target compiler filter to be used as reference");
+ UsageError(" when deciding if the dex file needs to be optimized.");
+ UsageError("");
+ UsageError(" --assume-profile-changed: assumes the profile information has changed");
+ UsageError(" when deciding if the dex file needs to be optimized.");
+ UsageError("");
+ UsageError(" --image=<filename>: optional, the image to be used to decide if the associated");
+ UsageError(" oat file is up to date. Defaults to $ANDROID_ROOT/framework/boot.art.");
+ UsageError(" Example: --image=/system/framework/boot.art");
+ UsageError("");
+ UsageError(" --android-data=<directory>: optional, the directory which should be used as");
+ UsageError(" android-data. By default ANDROID_DATA env variable is used.");
+ UsageError("");
+ UsageError("Return code:");
+ UsageError(" To make it easier to integrate with the internal tools this command will make");
+ UsageError(" available its result (dexoptNeeded) as the exit/return code. i.e. it will not");
+ UsageError(" return 0 for success and a non zero values for errors as the conventional");
+ UsageError(" commands. The following return codes are possible:");
+ UsageError(" kNoDexOptNeeded = 0");
+ UsageError(" kDex2OatFromScratch = 1");
+ UsageError(" kDex2OatForBootImageOat = 2");
+ UsageError(" kDex2OatForFilterOat = 3");
+ UsageError(" kDex2OatForRelocationOat = 4");
+ UsageError(" kDex2OatForBootImageOdex = 5");
+ UsageError(" kDex2OatForFilterOdex = 6");
+ UsageError(" kDex2OatForRelocationOdex = 7");
+
+ UsageError(" kErrorInvalidArguments = 101");
+ UsageError(" kErrorCannotCreateRuntime = 102");
+ UsageError(" kErrorUnknownDexOptNeeded = 103");
+ UsageError("");
+
+ exit(kErrorInvalidArguments);
+}
+
+class DexoptAnalyzer FINAL {
+ public:
+ DexoptAnalyzer() : assume_profile_changed_(false) {}
+
+ void ParseArgs(int argc, char **argv) {
+ original_argc = argc;
+ original_argv = argv;
+
+ InitLogging(argv, Runtime::Aborter);
+ // Skip over the command name.
+ argv++;
+ argc--;
+
+ if (argc == 0) {
+ Usage("No arguments specified");
+ }
+
+ for (int i = 0; i < argc; ++i) {
+ const StringPiece option(argv[i]);
+ if (option == "--assume-profile-changed") {
+ assume_profile_changed_ = true;
+ } else if (option.starts_with("--dex-file=")) {
+ dex_file_ = option.substr(strlen("--dex-file=")).ToString();
+ } else if (option.starts_with("--compiler-filter=")) {
+ std::string filter_str = option.substr(strlen("--compiler-filter=")).ToString();
+ if (!CompilerFilter::ParseCompilerFilter(filter_str.c_str(), &compiler_filter_)) {
+ Usage("Invalid compiler filter '%s'", option.data());
+ }
+ } else if (option.starts_with("--isa=")) {
+ std::string isa_str = option.substr(strlen("--isa=")).ToString();
+ isa_ = GetInstructionSetFromString(isa_str.c_str());
+ if (isa_ == kNone) {
+ Usage("Invalid isa '%s'", option.data());
+ }
+ } else if (option.starts_with("--image=")) {
+ image_ = option.substr(strlen("--image=")).ToString();
+ } else if (option.starts_with("--android-data=")) {
+ // Overwrite android-data if needed (oat file assistant relies on a valid directory to
+ // compute dalvik-cache folder). This is mostly used in tests.
+ std::string new_android_data = option.substr(strlen("--android-data=")).ToString();
+ setenv("ANDROID_DATA", new_android_data.c_str(), 1);
+ } else {
+ Usage("Unknown argument '%s'", option.data());
+ }
+ }
+
+ if (image_.empty()) {
+ // If we don't receive the image, try to use the default one.
+ // Tests may specify a different image (e.g. core image).
+ std::string error_msg;
+ image_ = GetDefaultBootImageLocation(&error_msg);
+
+ if (image_.empty()) {
+ LOG(ERROR) << error_msg;
+ Usage("--image unspecified and ANDROID_ROOT not set or image file does not exist.");
+ }
+ }
+ }
+
+ bool CreateRuntime() {
+ RuntimeOptions options;
+ // The image could be custom, so make sure we explicitly pass it.
+ std::string img = "-Ximage:" + image_;
+ options.push_back(std::make_pair(img.c_str(), nullptr));
+ // The instruction set of the image should match the instruction set we will test.
+ const void* isa_opt = reinterpret_cast<const void*>(GetInstructionSetString(isa_));
+ options.push_back(std::make_pair("imageinstructionset", isa_opt));
+ // Disable libsigchain. We don't don't need it to evaluate DexOptNeeded status.
+ options.push_back(std::make_pair("-Xno-sig-chain", nullptr));
+ // Pretend we are a compiler so that we can re-use the same infrastructure to load a different
+ // ISA image and minimize the amount of things that get started.
+ NoopCompilerCallbacks callbacks;
+ options.push_back(std::make_pair("compilercallbacks", &callbacks));
+ // Make sure we don't attempt to relocate. The tool should only retrieve the DexOptNeeded
+ // status and not attempt to relocate the boot image.
+ options.push_back(std::make_pair("-Xnorelocate", nullptr));
+
+ if (!Runtime::Create(options, false)) {
+ LOG(ERROR) << "Unable to initialize runtime";
+ return false;
+ }
+ // Runtime::Create acquired the mutator_lock_ that is normally given away when we
+ // Runtime::Start. Give it away now.
+ Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+
+ return true;
+ }
+
+ int GetDexOptNeeded() {
+ // If the file does not exist there's nothing to do.
+ // This is a fast path to avoid creating the runtime (b/34385298).
+ if (!OS::FileExists(dex_file_.c_str())) {
+ return kNoDexOptNeeded;
+ }
+ if (!CreateRuntime()) {
+ return kErrorCannotCreateRuntime;
+ }
+ OatFileAssistant oat_file_assistant(dex_file_.c_str(), isa_, /*load_executable*/ false);
+ // Always treat elements of the bootclasspath as up-to-date.
+ // TODO(calin): this check should be in OatFileAssistant.
+ if (oat_file_assistant.IsInBootClassPath()) {
+ return kNoDexOptNeeded;
+ }
+ int dexoptNeeded = oat_file_assistant.GetDexOptNeeded(
+ compiler_filter_, assume_profile_changed_);
+
+ // Convert OatFileAssitant codes to dexoptanalyzer codes.
+ switch (dexoptNeeded) {
+ case OatFileAssistant::kNoDexOptNeeded: return kNoDexOptNeeded;
+ case OatFileAssistant::kDex2OatFromScratch: return kDex2OatFromScratch;
+ case OatFileAssistant::kDex2OatForBootImage: return kDex2OatForBootImageOat;
+ case OatFileAssistant::kDex2OatForFilter: return kDex2OatForFilterOat;
+ case OatFileAssistant::kDex2OatForRelocation: return kDex2OatForRelocationOat;
+
+ case -OatFileAssistant::kDex2OatForBootImage: return kDex2OatForBootImageOdex;
+ case -OatFileAssistant::kDex2OatForFilter: return kDex2OatForFilterOdex;
+ case -OatFileAssistant::kDex2OatForRelocation: return kDex2OatForRelocationOdex;
+ default:
+ LOG(ERROR) << "Unknown dexoptNeeded " << dexoptNeeded;
+ return kErrorUnknownDexOptNeeded;
+ }
+ }
+
+ private:
+ std::string dex_file_;
+ InstructionSet isa_;
+ CompilerFilter::Filter compiler_filter_;
+ bool assume_profile_changed_;
+ std::string image_;
+};
+
+static int dexoptAnalyze(int argc, char** argv) {
+ DexoptAnalyzer analyzer;
+
+ // Parse arguments. Argument mistakes will lead to exit(kErrorInvalidArguments) in UsageError.
+ analyzer.ParseArgs(argc, argv);
+ return analyzer.GetDexOptNeeded();
+}
+
+} // namespace art
+
+int main(int argc, char **argv) {
+ return art::dexoptAnalyze(argc, argv);
+}
diff --git a/dexoptanalyzer/dexoptanalyzer_test.cc b/dexoptanalyzer/dexoptanalyzer_test.cc
new file mode 100644
index 0000000..57d3f1f
--- /dev/null
+++ b/dexoptanalyzer/dexoptanalyzer_test.cc
@@ -0,0 +1,311 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "arch/instruction_set.h"
+#include "compiler_filter.h"
+#include "dexopt_test.h"
+
+namespace art {
+
+class DexoptAnalyzerTest : public DexoptTest {
+ protected:
+ std::string GetDexoptAnalyzerCmd() {
+ std::string file_path = GetTestAndroidRoot();
+ file_path += "/bin/dexoptanalyzer";
+ if (kIsDebugBuild) {
+ file_path += "d";
+ }
+ EXPECT_TRUE(OS::FileExists(file_path.c_str())) << file_path << " should be a valid file path";
+ return file_path;
+ }
+
+ int Analyze(const std::string& dex_file,
+ CompilerFilter::Filter compiler_filter,
+ bool assume_profile_changed) {
+ std::string dexoptanalyzer_cmd = GetDexoptAnalyzerCmd();
+ std::vector<std::string> argv_str;
+ argv_str.push_back(dexoptanalyzer_cmd);
+ argv_str.push_back("--dex-file=" + dex_file);
+ argv_str.push_back("--isa=" + std::string(GetInstructionSetString(kRuntimeISA)));
+ argv_str.push_back("--compiler-filter=" + CompilerFilter::NameOfFilter(compiler_filter));
+ if (assume_profile_changed) {
+ argv_str.push_back("--assume-profile-changed");
+ }
+ argv_str.push_back("--image=" + GetImageLocation());
+ argv_str.push_back("--android-data=" + android_data_);
+
+ std::string error;
+ return ExecAndReturnCode(argv_str, &error);
+ }
+
+ int DexoptanalyzerToOatFileAssistant(int dexoptanalyzerResult) {
+ switch (dexoptanalyzerResult) {
+ case 0: return OatFileAssistant::kNoDexOptNeeded;
+ case 1: return OatFileAssistant::kDex2OatFromScratch;
+ case 2: return OatFileAssistant::kDex2OatForBootImage;
+ case 3: return OatFileAssistant::kDex2OatForFilter;
+ case 4: return OatFileAssistant::kDex2OatForRelocation;
+ case 5: return -OatFileAssistant::kDex2OatForBootImage;
+ case 6: return -OatFileAssistant::kDex2OatForFilter;
+ case 7: return -OatFileAssistant::kDex2OatForRelocation;
+ default: return dexoptanalyzerResult;
+ }
+ }
+
+ // Verify that the output of dexoptanalyzer for the given arguments is the same
+ // as the output of OatFileAssistant::GetDexOptNeeded.
+ void Verify(const std::string& dex_file,
+ CompilerFilter::Filter compiler_filter,
+ bool assume_profile_changed = false) {
+ int dexoptanalyzerResult = Analyze(dex_file, compiler_filter, assume_profile_changed);
+ dexoptanalyzerResult = DexoptanalyzerToOatFileAssistant(dexoptanalyzerResult);
+ OatFileAssistant oat_file_assistant(dex_file.c_str(), kRuntimeISA, /*load_executable*/ false);
+ int assistantResult = oat_file_assistant.GetDexOptNeeded(
+ compiler_filter, assume_profile_changed);
+ EXPECT_EQ(assistantResult, dexoptanalyzerResult);
+ }
+};
+
+// The tests below exercise the same test case from oat_file_assistant_test.cc.
+
+// Case: We have a DEX file, but no OAT file for it.
+TEST_F(DexoptAnalyzerTest, DexNoOat) {
+ std::string dex_location = GetScratchDir() + "/DexNoOat.jar";
+ Copy(GetDexSrc1(), dex_location);
+
+ Verify(dex_location, CompilerFilter::kSpeed);
+ Verify(dex_location, CompilerFilter::kVerifyAtRuntime);
+ Verify(dex_location, CompilerFilter::kInterpretOnly);
+ Verify(dex_location, CompilerFilter::kSpeedProfile);
+}
+
+// Case: We have a DEX file and up-to-date OAT file for it.
+TEST_F(DexoptAnalyzerTest, OatUpToDate) {
+ std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str(), CompilerFilter::kSpeed);
+
+ Verify(dex_location, CompilerFilter::kSpeed);
+ Verify(dex_location, CompilerFilter::kInterpretOnly);
+ Verify(dex_location, CompilerFilter::kVerifyAtRuntime);
+ Verify(dex_location, CompilerFilter::kEverything);
+}
+
+// Case: We have a DEX file and speed-profile OAT file for it.
+TEST_F(DexoptAnalyzerTest, ProfileOatUpToDate) {
+ std::string dex_location = GetScratchDir() + "/ProfileOatUpToDate.jar";
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str(), CompilerFilter::kSpeedProfile);
+
+ Verify(dex_location, CompilerFilter::kSpeedProfile, false);
+ Verify(dex_location, CompilerFilter::kInterpretOnly, false);
+ Verify(dex_location, CompilerFilter::kSpeedProfile, true);
+ Verify(dex_location, CompilerFilter::kInterpretOnly, true);
+}
+
+// Case: We have a MultiDEX file and up-to-date OAT file for it.
+TEST_F(DexoptAnalyzerTest, MultiDexOatUpToDate) {
+ std::string dex_location = GetScratchDir() + "/MultiDexOatUpToDate.jar";
+ Copy(GetMultiDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str(), CompilerFilter::kSpeed);
+
+ Verify(dex_location, CompilerFilter::kSpeed, false);
+}
+
+// Case: We have a MultiDEX file where the secondary dex file is out of date.
+TEST_F(DexoptAnalyzerTest, MultiDexSecondaryOutOfDate) {
+ std::string dex_location = GetScratchDir() + "/MultiDexSecondaryOutOfDate.jar";
+
+ // Compile code for GetMultiDexSrc1.
+ Copy(GetMultiDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str(), CompilerFilter::kSpeed);
+
+ // Now overwrite the dex file with GetMultiDexSrc2 so the secondary checksum
+ // is out of date.
+ Copy(GetMultiDexSrc2(), dex_location);
+
+ Verify(dex_location, CompilerFilter::kSpeed, false);
+}
+
+
+// Case: We have a DEX file and an OAT file out of date with respect to the
+// dex checksum.
+TEST_F(DexoptAnalyzerTest, OatDexOutOfDate) {
+ std::string dex_location = GetScratchDir() + "/OatDexOutOfDate.jar";
+
+ // We create a dex, generate an oat for it, then overwrite the dex with a
+ // different dex to make the oat out of date.
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str(), CompilerFilter::kSpeed);
+ Copy(GetDexSrc2(), dex_location);
+
+ Verify(dex_location, CompilerFilter::kVerifyAtRuntime);
+ Verify(dex_location, CompilerFilter::kSpeed);
+}
+
+// Case: We have a DEX file and an OAT file out of date with respect to the
+// boot image.
+TEST_F(DexoptAnalyzerTest, OatImageOutOfDate) {
+ std::string dex_location = GetScratchDir() + "/OatImageOutOfDate.jar";
+
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str(),
+ CompilerFilter::kSpeed,
+ /*relocate*/true,
+ /*pic*/false,
+ /*with_alternate_image*/true);
+
+ Verify(dex_location, CompilerFilter::kVerifyAtRuntime);
+ Verify(dex_location, CompilerFilter::kInterpretOnly);
+ Verify(dex_location, CompilerFilter::kSpeed);
+}
+
+// Case: We have a DEX file and a verify-at-runtime OAT file out of date with
+// respect to the boot image.
+// It shouldn't matter that the OAT file is out of date, because it is
+// verify-at-runtime.
+TEST_F(DexoptAnalyzerTest, OatVerifyAtRuntimeImageOutOfDate) {
+ std::string dex_location = GetScratchDir() + "/OatVerifyAtRuntimeImageOutOfDate.jar";
+
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str(),
+ CompilerFilter::kVerifyAtRuntime,
+ /*relocate*/true,
+ /*pic*/false,
+ /*with_alternate_image*/true);
+
+ Verify(dex_location, CompilerFilter::kVerifyAtRuntime);
+ Verify(dex_location, CompilerFilter::kInterpretOnly);
+}
+
+// Case: We have a DEX file and an ODEX file, but no OAT file.
+TEST_F(DexoptAnalyzerTest, DexOdexNoOat) {
+ std::string dex_location = GetScratchDir() + "/DexOdexNoOat.jar";
+ std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex";
+
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
+
+ Verify(dex_location, CompilerFilter::kVerifyAtRuntime);
+ Verify(dex_location, CompilerFilter::kSpeed);
+}
+
+// Case: We have a stripped DEX file and a PIC ODEX file, but no OAT file.
+TEST_F(DexoptAnalyzerTest, StrippedDexOdexNoOat) {
+ std::string dex_location = GetScratchDir() + "/StrippedDexOdexNoOat.jar";
+ std::string odex_location = GetOdexDir() + "/StrippedDexOdexNoOat.odex";
+
+ Copy(GetDexSrc1(), dex_location);
+ GeneratePicOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
+
+ // Strip the dex file
+ Copy(GetStrippedDexSrc1(), dex_location);
+
+ Verify(dex_location, CompilerFilter::kSpeed);
+}
+
+// Case: We have a stripped DEX file, a PIC ODEX file, and an out-of-date OAT file.
+TEST_F(DexoptAnalyzerTest, StrippedDexOdexOat) {
+ std::string dex_location = GetScratchDir() + "/StrippedDexOdexOat.jar";
+ std::string odex_location = GetOdexDir() + "/StrippedDexOdexOat.odex";
+
+ // Create the oat file from a different dex file so it looks out of date.
+ Copy(GetDexSrc2(), dex_location);
+ GenerateOatForTest(dex_location.c_str(), CompilerFilter::kSpeed);
+
+ // Create the odex file
+ Copy(GetDexSrc1(), dex_location);
+ GeneratePicOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
+
+ // Strip the dex file.
+ Copy(GetStrippedDexSrc1(), dex_location);
+
+ Verify(dex_location, CompilerFilter::kVerifyAtRuntime);
+ Verify(dex_location, CompilerFilter::kSpeed);
+ Verify(dex_location, CompilerFilter::kEverything);
+}
+
+// Case: We have a stripped (or resource-only) DEX file, no ODEX file and no
+// OAT file. Expect: The status is kNoDexOptNeeded.
+TEST_F(DexoptAnalyzerTest, ResourceOnlyDex) {
+ std::string dex_location = GetScratchDir() + "/ResourceOnlyDex.jar";
+
+ Copy(GetStrippedDexSrc1(), dex_location);
+
+ Verify(dex_location, CompilerFilter::kSpeed);
+ Verify(dex_location, CompilerFilter::kVerifyAtRuntime);
+ Verify(dex_location, CompilerFilter::kInterpretOnly);
+}
+
+// Case: We have a DEX file, an ODEX file and an OAT file, where the ODEX and
+// OAT files both have patch delta of 0.
+TEST_F(DexoptAnalyzerTest, OdexOatOverlap) {
+ std::string dex_location = GetScratchDir() + "/OdexOatOverlap.jar";
+ std::string odex_location = GetOdexDir() + "/OdexOatOverlap.odex";
+ std::string oat_location = GetOdexDir() + "/OdexOatOverlap.oat";
+
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
+
+ // Create the oat file by copying the odex so they are located in the same
+ // place in memory.
+ Copy(odex_location, oat_location);
+
+ Verify(dex_location, CompilerFilter::kSpeed);
+}
+
+// Case: We have a DEX file and a PIC ODEX file, but no OAT file.
+TEST_F(DexoptAnalyzerTest, DexPicOdexNoOat) {
+ std::string dex_location = GetScratchDir() + "/DexPicOdexNoOat.jar";
+ std::string odex_location = GetOdexDir() + "/DexPicOdexNoOat.odex";
+
+ Copy(GetDexSrc1(), dex_location);
+ GeneratePicOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
+
+ Verify(dex_location, CompilerFilter::kSpeed);
+ Verify(dex_location, CompilerFilter::kEverything);
+}
+
+// Case: We have a DEX file and a VerifyAtRuntime ODEX file, but no OAT file..
+TEST_F(DexoptAnalyzerTest, DexVerifyAtRuntimeOdexNoOat) {
+ std::string dex_location = GetScratchDir() + "/DexVerifyAtRuntimeOdexNoOat.jar";
+ std::string odex_location = GetOdexDir() + "/DexVerifyAtRuntimeOdexNoOat.odex";
+
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kVerifyAtRuntime);
+
+ Verify(dex_location, CompilerFilter::kVerifyAtRuntime);
+ Verify(dex_location, CompilerFilter::kSpeed);
+}
+
+// Case: Non-standard extension for dex file.
+TEST_F(DexoptAnalyzerTest, LongDexExtension) {
+ std::string dex_location = GetScratchDir() + "/LongDexExtension.jarx";
+ Copy(GetDexSrc1(), dex_location);
+
+ Verify(dex_location, CompilerFilter::kSpeed);
+}
+
+// Case: Very short, non-existent Dex location.
+TEST_F(DexoptAnalyzerTest, ShortDexLocation) {
+ std::string dex_location = "/xx";
+
+ Verify(dex_location, CompilerFilter::kSpeed);
+}
+
+} // namespace art
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index 3f2afc0..0d46b2e 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -24,6 +24,7 @@
#include "runtime/os.h"
#include "runtime/arch/instruction_set.h"
+#include "runtime/exec_utils.h"
#include "runtime/utils.h"
#include "runtime/gc/space/image_space.h"
#include "runtime/gc/heap.h"
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 69901c1..0f02da7 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -589,16 +589,17 @@
kByteKindCodeInfoInlineInfo,
kByteKindCodeInfoEncoding,
kByteKindCodeInfoOther,
+ kByteKindCodeInfoStackMasks,
+ kByteKindCodeInfoRegisterMasks,
kByteKindStackMapNativePc,
kByteKindStackMapDexPc,
kByteKindStackMapDexRegisterMap,
kByteKindStackMapInlineInfo,
- kByteKindStackMapRegisterMask,
- kByteKindStackMapMask,
- kByteKindStackMapOther,
+ kByteKindStackMapRegisterMaskIndex,
+ kByteKindStackMapStackMaskIndex,
kByteKindCount,
kByteKindStackMapFirst = kByteKindCodeInfoOther,
- kByteKindStackMapLast = kByteKindStackMapOther,
+ kByteKindStackMapLast = kByteKindStackMapStackMaskIndex,
};
int64_t bits[kByteKindCount] = {};
// Since code has deduplication, seen tracks already seen pointers to avoid double counting
@@ -626,48 +627,45 @@
const int64_t stack_map_bits = std::accumulate(bits + kByteKindStackMapFirst,
bits + kByteKindStackMapLast + 1,
0u);
- Dump(os, "Code ", bits[kByteKindCode], sum);
- Dump(os, "QuickMethodHeader ", bits[kByteKindQuickMethodHeader], sum);
- Dump(os, "CodeInfoEncoding ", bits[kByteKindCodeInfoEncoding], sum);
- Dump(os, "CodeInfoLocationCatalog ", bits[kByteKindCodeInfoLocationCatalog], sum);
- Dump(os, "CodeInfoDexRegisterMap ", bits[kByteKindCodeInfoDexRegisterMap], sum);
- Dump(os, "CodeInfoInlineInfo ", bits[kByteKindCodeInfoInlineInfo], sum);
- Dump(os, "CodeInfoStackMap ", stack_map_bits, sum);
+ Dump(os, "Code ", bits[kByteKindCode], sum);
+ Dump(os, "QuickMethodHeader ", bits[kByteKindQuickMethodHeader], sum);
+ Dump(os, "CodeInfoEncoding ", bits[kByteKindCodeInfoEncoding], sum);
+ Dump(os, "CodeInfoLocationCatalog ", bits[kByteKindCodeInfoLocationCatalog], sum);
+ Dump(os, "CodeInfoDexRegisterMap ", bits[kByteKindCodeInfoDexRegisterMap], sum);
+ Dump(os, "CodeInfoInlineInfo ", bits[kByteKindCodeInfoInlineInfo], sum);
+ Dump(os, "CodeInfoStackMasks ", bits[kByteKindCodeInfoStackMasks], sum);
+ Dump(os, "CodeInfoRegisterMasks ", bits[kByteKindCodeInfoRegisterMasks], sum);
+ Dump(os, "CodeInfoStackMap ", stack_map_bits, sum);
{
ScopedIndentation indent1(&os);
Dump(os,
- "StackMapNativePc ",
+ "StackMapNativePc ",
bits[kByteKindStackMapNativePc],
stack_map_bits,
"stack map");
Dump(os,
- "StackMapDexPcEncoding ",
+ "StackMapDexPcEncoding ",
bits[kByteKindStackMapDexPc],
stack_map_bits,
"stack map");
Dump(os,
- "StackMapDexRegisterMap ",
+ "StackMapDexRegisterMap ",
bits[kByteKindStackMapDexRegisterMap],
stack_map_bits,
"stack map");
Dump(os,
- "StackMapInlineInfo ",
+ "StackMapInlineInfo ",
bits[kByteKindStackMapInlineInfo],
stack_map_bits,
"stack map");
Dump(os,
- "StackMapRegisterMaskEncoding ",
- bits[kByteKindStackMapRegisterMask],
+ "StackMapRegisterMaskIndex ",
+ bits[kByteKindStackMapRegisterMaskIndex],
stack_map_bits,
"stack map");
Dump(os,
- "StackMapMask ",
- bits[kByteKindStackMapMask],
- stack_map_bits,
- "stack map");
- Dump(os,
- "StackMapOther ",
- bits[kByteKindStackMapOther],
+ "StackMapStackMaskIndex ",
+ bits[kByteKindStackMapStackMaskIndex],
stack_map_bits,
"stack map");
}
@@ -1573,18 +1571,18 @@
Stats::kByteKindStackMapInlineInfo,
stack_map_encoding.GetInlineInfoEncoding().BitSize() * num_stack_maps);
stats_.AddBits(
- Stats::kByteKindStackMapRegisterMask,
- stack_map_encoding.GetRegisterMaskEncoding().BitSize() * num_stack_maps);
- const size_t stack_mask_bits = encoding.stack_map_size_in_bytes * kBitsPerByte -
- stack_map_encoding.GetStackMaskBitOffset();
+ Stats::kByteKindStackMapRegisterMaskIndex,
+ stack_map_encoding.GetRegisterMaskIndexEncoding().BitSize() * num_stack_maps);
stats_.AddBits(
- Stats::kByteKindStackMapMask,
- stack_mask_bits * num_stack_maps);
- const size_t stack_map_bits =
- stack_map_encoding.GetStackMaskBitOffset() + stack_mask_bits;
+ Stats::kByteKindStackMapStackMaskIndex,
+ stack_map_encoding.GetStackMaskIndexEncoding().BitSize() * num_stack_maps);
stats_.AddBits(
- Stats::kByteKindStackMapOther,
- (encoding.stack_map_size_in_bytes * kBitsPerByte - stack_map_bits) * num_stack_maps);
+ Stats::kByteKindCodeInfoStackMasks,
+ helper.GetCodeInfo().GetNumberOfStackMaskBits(encoding) *
+ encoding.number_of_stack_masks);
+ stats_.AddBits(
+ Stats::kByteKindCodeInfoRegisterMasks,
+ encoding.register_mask_size_in_bits * encoding.number_of_stack_masks);
const size_t stack_map_bytes = helper.GetCodeInfo().GetStackMapsSize(encoding);
const size_t location_catalog_bytes =
helper.GetCodeInfo().GetDexRegisterLocationCatalogSize(encoding);
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index ba57d18..503cd4d 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -24,6 +24,7 @@
#include "base/unix_file/fd_file.h"
#include "runtime/arch/instruction_set.h"
+#include "runtime/exec_utils.h"
#include "runtime/gc/heap.h"
#include "runtime/gc/space/image_space.h"
#include "runtime/os.h"
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 2f40fef..a6c3cf0 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -18,6 +18,7 @@
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
+#include "exec_utils.h"
#include "profile_assistant.h"
#include "jit/profile_compilation_info.h"
#include "utils.h"
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 81f174e..9585ba2 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -57,6 +57,7 @@
"dex_file_verifier.cc",
"dex_instruction.cc",
"elf_file.cc",
+ "exec_utils.cc",
"fault_handler.cc",
"gc/allocation_record.cc",
"gc/allocator/dlmalloc.cc",
@@ -154,6 +155,7 @@
"native/java_lang_Thread.cc",
"native/java_lang_Throwable.cc",
"native/java_lang_VMClassLoader.cc",
+ "native/java_lang_invoke_MethodHandleImpl.cc",
"native/java_lang_ref_FinalizerReference.cc",
"native/java_lang_ref_Reference.cc",
"native/java_lang_reflect_Array.cc",
@@ -185,6 +187,7 @@
"reflection.cc",
"runtime.cc",
"runtime_callbacks.cc",
+ "runtime_common.cc",
"runtime_options.cc",
"signal_catcher.cc",
"stack.cc",
@@ -205,6 +208,7 @@
"verifier/reg_type_cache.cc",
"verifier/register_line.cc",
"verifier/verifier_deps.cc",
+ "verify_object.cc",
"well_known_classes.cc",
"zip_archive.cc",
@@ -376,6 +380,10 @@
},
cflags: ["-DBUILDING_LIBART=1"],
generated_sources: ["art_operator_srcs"],
+ // asm_support_gen.h (used by asm_support.h) is generated with cpp-define-generator
+ generated_headers: ["cpp-define-generator-asm-support"],
+ // export our headers so the libart-gtest targets can use it as well.
+ export_generated_headers: ["cpp-define-generator-asm-support"],
clang: true,
include_dirs: [
"art/cmdline",
@@ -474,10 +482,14 @@
art_cc_library {
name: "libart-runtime-gtest",
defaults: ["libart-gtest-defaults"],
- srcs: ["common_runtime_test.cc"],
+ srcs: [
+ "common_runtime_test.cc",
+ "dexopt_test.cc"
+ ],
shared_libs: [
"libartd",
"libbase",
+ "libbacktrace"
],
}
@@ -570,6 +582,7 @@
"type_lookup_table_test.cc",
"utf_test.cc",
"utils_test.cc",
+ "vdex_file_test.cc",
"verifier/method_verifier_test.cc",
"verifier/reg_type_test.cc",
"zip_archive_test.cc",
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index 6c2c815..8384460 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -31,6 +31,7 @@
#if defined(__arm__)
extern "C" bool artCheckForArmSdivInstruction();
+extern "C" bool artCheckForArmv8AInstructions();
#endif
namespace art {
@@ -39,22 +40,34 @@
ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg) {
+ static const char* arm_variants_with_armv8a[] = {
+ "cortex-a32",
+ "cortex-a35",
+ "cortex-a53",
+ "cortex-a53.a57",
+ "cortex-a53.a72",
+ "cortex-a57",
+ "cortex-a72",
+ "cortex-a73",
+ "exynos-m1",
+ "denver",
+ "kryo"
+ };
+ bool has_armv8a = FindVariantInArray(arm_variants_with_armv8a,
+ arraysize(arm_variants_with_armv8a),
+ variant);
+
// Look for variants that have divide support.
static const char* arm_variants_with_div[] = {
"cortex-a7",
"cortex-a12",
"cortex-a15",
"cortex-a17",
- "cortex-a53",
- "cortex-a53.a57",
- "cortex-a57",
- "denver",
"krait",
};
-
- bool has_div = FindVariantInArray(arm_variants_with_div,
- arraysize(arm_variants_with_div),
- variant);
+ bool has_div = has_armv8a || FindVariantInArray(arm_variants_with_div,
+ arraysize(arm_variants_with_div),
+ variant);
// Look for variants that have LPAE support.
static const char* arm_variants_with_lpae[] = {
@@ -62,17 +75,13 @@
"cortex-a12",
"cortex-a15",
"cortex-a17",
- "cortex-a53",
- "cortex-a53.a57",
- "cortex-a57",
- "denver",
"krait",
};
- bool has_lpae = FindVariantInArray(arm_variants_with_lpae,
- arraysize(arm_variants_with_lpae),
- variant);
+ bool has_atomic_ldrd_strd = has_armv8a || FindVariantInArray(arm_variants_with_lpae,
+ arraysize(arm_variants_with_lpae),
+ variant);
- if (has_div == false && has_lpae == false) {
+ if (has_armv8a == false && has_div == false && has_atomic_ldrd_strd == false) {
static const char* arm_variants_with_default_features[] = {
"cortex-a5",
"cortex-a8",
@@ -92,34 +101,48 @@
<< ") using conservative defaults";
}
}
- return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_lpae));
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div,
+ has_atomic_ldrd_strd,
+ has_armv8a));
}
ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
bool has_div = (bitmap & kDivBitfield) != 0;
bool has_atomic_ldrd_strd = (bitmap & kAtomicLdrdStrdBitfield) != 0;
- return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_atomic_ldrd_strd));
+ bool has_armv8a = (bitmap & kARMv8A) != 0;
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div,
+ has_atomic_ldrd_strd,
+ has_armv8a));
}
ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromCppDefines() {
-#if defined(__ARM_ARCH_EXT_IDIV__)
+// Note: This will not work for now since we still build the 32-bit as __ARCH_ARM_7A__.
+#if defined(__ARM_ARCH_8A__)
+ const bool has_armv8a = true;
+#else
+ const bool has_armv8a = false;
+#endif
+#if defined (__ARM_ARCH_8A__) || defined(__ARM_ARCH_EXT_IDIV__)
const bool has_div = true;
#else
const bool has_div = false;
#endif
-#if defined(__ARM_FEATURE_LPAE)
- const bool has_lpae = true;
+#if defined (__ARM_ARCH_8A__) || defined(__ARM_FEATURE_LPAE)
+ const bool has_atomic_ldrd_strd = true;
#else
- const bool has_lpae = false;
+ const bool has_atomic_ldrd_strd = false;
#endif
- return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_lpae));
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div,
+ has_atomic_ldrd_strd,
+ has_armv8a));
}
ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromCpuInfo() {
// Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
// the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
- bool has_lpae = false;
+ bool has_atomic_ldrd_strd = false;
bool has_div = false;
+ bool has_armv8a = false;
std::ifstream in("/proc/cpuinfo");
if (!in.fail()) {
@@ -137,21 +160,33 @@
has_div = true;
}
if (line.find("lpae") != std::string::npos) {
- has_lpae = true;
+ has_atomic_ldrd_strd = true;
}
}
+ if (line.find("architecture") != std::string::npos
+ && line.find(": 8") != std::string::npos) {
+ LOG(INFO) << "found architecture ARMv8";
+ // Android is only run on A cores, so ARMv8 implies ARMv8-A.
+ has_armv8a = true;
+ // ARMv8 CPUs have LPAE and div support.
+ has_div = true;
+ has_atomic_ldrd_strd = true;
+ }
}
}
in.close();
} else {
LOG(ERROR) << "Failed to open /proc/cpuinfo";
}
- return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_lpae));
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div,
+ has_atomic_ldrd_strd,
+ has_armv8a));
}
ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromHwcap() {
bool has_div = false;
- bool has_lpae = false;
+ bool has_atomic_ldrd_strd = false;
+ bool has_armv8a = false;
#if defined(ART_TARGET_ANDROID) && defined(__arm__)
uint64_t hwcaps = getauxval(AT_HWCAP);
@@ -163,18 +198,27 @@
has_div = true;
}
if ((hwcaps & HWCAP_LPAE) != 0) {
- has_lpae = true;
+ has_atomic_ldrd_strd = true;
+ }
+ // TODO: Fix this once FPMISC makes it upstream.
+ // For now we detect if we run on an ARMv8 CPU by looking for CRC32 and SHA1
+ // (only available on ARMv8 CPUs).
+ if ((hwcaps & HWCAP2_CRC32) != 0 && (hwcaps & HWCAP2_SHA1) != 0) {
+ has_armv8a = true;
}
#endif
- return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_lpae));
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div,
+ has_atomic_ldrd_strd,
+ has_armv8a));
}
// A signal handler called by a fault for an illegal instruction. We record the fact in r0
// and then increment the PC in the signal context to return to the next instruction. We know the
-// instruction is an sdiv (4 bytes long).
-static void bad_divide_inst_handle(int signo ATTRIBUTE_UNUSED, siginfo_t* si ATTRIBUTE_UNUSED,
- void* data) {
+// instruction is 4 bytes long.
+static void bad_instr_handle(int signo ATTRIBUTE_UNUSED,
+ siginfo_t* si ATTRIBUTE_UNUSED,
+ void* data) {
#if defined(__arm__)
struct ucontext *uc = (struct ucontext *)data;
struct sigcontext *sc = &uc->uc_mcontext;
@@ -190,15 +234,19 @@
// instruction. If we get a SIGILL then it's not supported.
struct sigaction sa, osa;
sa.sa_flags = SA_ONSTACK | SA_RESTART | SA_SIGINFO;
- sa.sa_sigaction = bad_divide_inst_handle;
+ sa.sa_sigaction = bad_instr_handle;
sigemptyset(&sa.sa_mask);
sigaction(SIGILL, &sa, &osa);
bool has_div = false;
+ bool has_armv8a = false;
#if defined(__arm__)
if (artCheckForArmSdivInstruction()) {
has_div = true;
}
+ if (artCheckForArmv8AInstructions()) {
+ has_armv8a = true;
+ }
#endif
// Restore the signal handler.
@@ -207,11 +255,13 @@
// Use compile time features to "detect" LPAE support.
// TODO: write an assembly LPAE support test.
#if defined(__ARM_FEATURE_LPAE)
- const bool has_lpae = true;
+ const bool has_atomic_ldrd_strd = true;
#else
- const bool has_lpae = false;
+ const bool has_atomic_ldrd_strd = false;
#endif
- return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_lpae));
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div,
+ has_atomic_ldrd_strd,
+ has_armv8a));
}
bool ArmInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
@@ -219,13 +269,26 @@
return false;
}
const ArmInstructionSetFeatures* other_as_arm = other->AsArmInstructionSetFeatures();
- return has_div_ == other_as_arm->has_div_ &&
- has_atomic_ldrd_strd_ == other_as_arm->has_atomic_ldrd_strd_;
+ return has_div_ == other_as_arm->has_div_
+ && has_atomic_ldrd_strd_ == other_as_arm->has_atomic_ldrd_strd_
+ && has_armv8a_ == other_as_arm->has_armv8a_;
+}
+
+bool ArmInstructionSetFeatures::HasAtLeast(const InstructionSetFeatures* other) const {
+ if (kArm != other->GetInstructionSet()) {
+ return false;
+ }
+ const ArmInstructionSetFeatures* other_as_arm = other->AsArmInstructionSetFeatures();
+
+ return (has_div_ || (has_div_ == other_as_arm->has_div_))
+ && (has_atomic_ldrd_strd_ || (has_atomic_ldrd_strd_ == other_as_arm->has_atomic_ldrd_strd_))
+ && (has_armv8a_ || (has_armv8a_ == other_as_arm->has_armv8a_));
}
uint32_t ArmInstructionSetFeatures::AsBitmap() const {
- return (has_div_ ? kDivBitfield : 0) |
- (has_atomic_ldrd_strd_ ? kAtomicLdrdStrdBitfield : 0);
+ return (has_div_ ? kDivBitfield : 0)
+ | (has_atomic_ldrd_strd_ ? kAtomicLdrdStrdBitfield : 0)
+ | (has_armv8a_ ? kARMv8A : 0);
}
std::string ArmInstructionSetFeatures::GetFeatureString() const {
@@ -240,6 +303,11 @@
} else {
result += ",-atomic_ldrd_strd";
}
+ if (has_armv8a_) {
+ result += ",armv8a";
+ } else {
+ result += ",-armv8a";
+ }
return result;
}
@@ -248,6 +316,7 @@
const std::vector<std::string>& features, std::string* error_msg) const {
bool has_atomic_ldrd_strd = has_atomic_ldrd_strd_;
bool has_div = has_div_;
+ bool has_armv8a = has_armv8a_;
for (auto i = features.begin(); i != features.end(); i++) {
std::string feature = android::base::Trim(*i);
if (feature == "div") {
@@ -258,13 +327,17 @@
has_atomic_ldrd_strd = true;
} else if (feature == "-atomic_ldrd_strd") {
has_atomic_ldrd_strd = false;
+ } else if (feature == "armv8a") {
+ has_armv8a = true;
+ } else if (feature == "-armv8a") {
+ has_armv8a = false;
} else {
*error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
return nullptr;
}
}
return std::unique_ptr<const InstructionSetFeatures>(
- new ArmInstructionSetFeatures(has_div, has_atomic_ldrd_strd));
+ new ArmInstructionSetFeatures(has_div, has_atomic_ldrd_strd, has_armv8a));
}
} // namespace art
diff --git a/runtime/arch/arm/instruction_set_features_arm.h b/runtime/arch/arm/instruction_set_features_arm.h
index 11f8bf0..f438a76 100644
--- a/runtime/arch/arm/instruction_set_features_arm.h
+++ b/runtime/arch/arm/instruction_set_features_arm.h
@@ -49,6 +49,8 @@
bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
+
InstructionSet GetInstructionSet() const OVERRIDE {
return kArm;
}
@@ -69,6 +71,11 @@
return has_atomic_ldrd_strd_;
}
+ // Are ARMv8-A instructions available?
+ bool HasARMv8AInstructions() const {
+ return has_armv8a_;
+ }
+
virtual ~ArmInstructionSetFeatures() {}
protected:
@@ -78,19 +85,24 @@
std::string* error_msg) const OVERRIDE;
private:
- ArmInstructionSetFeatures(bool has_div, bool has_atomic_ldrd_strd)
+ ArmInstructionSetFeatures(bool has_div,
+ bool has_atomic_ldrd_strd,
+ bool has_armv8a)
: InstructionSetFeatures(),
- has_div_(has_div), has_atomic_ldrd_strd_(has_atomic_ldrd_strd) {
- }
+ has_div_(has_div),
+ has_atomic_ldrd_strd_(has_atomic_ldrd_strd),
+ has_armv8a_(has_armv8a) {}
// Bitmap positions for encoding features as a bitmap.
enum {
kDivBitfield = 1 << 0,
kAtomicLdrdStrdBitfield = 1 << 1,
+ kARMv8A = 1 << 2,
};
const bool has_div_;
const bool has_atomic_ldrd_strd_;
+ const bool has_armv8a_;
DISALLOW_COPY_AND_ASSIGN(ArmInstructionSetFeatures);
};
diff --git a/runtime/arch/arm/instruction_set_features_arm_test.cc b/runtime/arch/arm/instruction_set_features_arm_test.cc
index 697ca90..6d5dd6d 100644
--- a/runtime/arch/arm/instruction_set_features_arm_test.cc
+++ b/runtime/arch/arm/instruction_set_features_arm_test.cc
@@ -31,7 +31,7 @@
EXPECT_TRUE(krait_features->Equals(krait_features.get()));
EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
- EXPECT_STREQ("div,atomic_ldrd_strd", krait_features->GetFeatureString().c_str());
+ EXPECT_STREQ("div,atomic_ldrd_strd,-armv8a", krait_features->GetFeatureString().c_str());
EXPECT_EQ(krait_features->AsBitmap(), 3U);
// Build features for a 32-bit ARM denver processor.
@@ -40,12 +40,13 @@
ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
EXPECT_TRUE(denver_features->Equals(denver_features.get()));
- EXPECT_TRUE(denver_features->Equals(krait_features.get()));
- EXPECT_TRUE(krait_features->Equals(denver_features.get()));
+ EXPECT_TRUE(denver_features->HasAtLeast(krait_features.get()));
+ EXPECT_FALSE(krait_features->Equals(denver_features.get()));
+ EXPECT_FALSE(krait_features->HasAtLeast(denver_features.get()));
EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
- EXPECT_STREQ("div,atomic_ldrd_strd", denver_features->GetFeatureString().c_str());
- EXPECT_EQ(denver_features->AsBitmap(), 3U);
+ EXPECT_STREQ("div,atomic_ldrd_strd,armv8a", denver_features->GetFeatureString().c_str());
+ EXPECT_EQ(denver_features->AsBitmap(), 7U);
// Build features for a 32-bit ARMv7 processor.
std::unique_ptr<const InstructionSetFeatures> generic_features(
@@ -57,7 +58,7 @@
EXPECT_FALSE(krait_features->Equals(generic_features.get()));
EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
- EXPECT_STREQ("-div,-atomic_ldrd_strd", generic_features->GetFeatureString().c_str());
+ EXPECT_STREQ("-div,-atomic_ldrd_strd,-armv8a", generic_features->GetFeatureString().c_str());
EXPECT_EQ(generic_features->AsBitmap(), 0U);
// ARM6 is not a supported architecture variant.
@@ -82,21 +83,22 @@
EXPECT_TRUE(krait_features->Equals(krait_features.get()));
EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
- EXPECT_STREQ("div,atomic_ldrd_strd", krait_features->GetFeatureString().c_str());
+ EXPECT_STREQ("div,atomic_ldrd_strd,-armv8a", krait_features->GetFeatureString().c_str());
EXPECT_EQ(krait_features->AsBitmap(), 3U);
// Build features for a 32-bit ARM processor with LPAE and div flipped.
std::unique_ptr<const InstructionSetFeatures> denver_features(
- base_features->AddFeaturesFromString("div,atomic_ldrd_strd", &error_msg));
+ base_features->AddFeaturesFromString("div,atomic_ldrd_strd,armv8a", &error_msg));
ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
EXPECT_TRUE(denver_features->Equals(denver_features.get()));
- EXPECT_TRUE(denver_features->Equals(krait_features.get()));
- EXPECT_TRUE(krait_features->Equals(denver_features.get()));
+ EXPECT_FALSE(denver_features->Equals(krait_features.get()));
+ EXPECT_TRUE(denver_features->HasAtLeast(krait_features.get()));
+ EXPECT_FALSE(krait_features->Equals(denver_features.get()));
EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
- EXPECT_STREQ("div,atomic_ldrd_strd", denver_features->GetFeatureString().c_str());
- EXPECT_EQ(denver_features->AsBitmap(), 3U);
+ EXPECT_STREQ("div,atomic_ldrd_strd,armv8a", denver_features->GetFeatureString().c_str());
+ EXPECT_EQ(denver_features->AsBitmap(), 7U);
// Build features for a 32-bit default ARM processor.
std::unique_ptr<const InstructionSetFeatures> generic_features(
@@ -108,7 +110,7 @@
EXPECT_FALSE(krait_features->Equals(generic_features.get()));
EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
- EXPECT_STREQ("-div,-atomic_ldrd_strd", generic_features->GetFeatureString().c_str());
+ EXPECT_STREQ("-div,-atomic_ldrd_strd,-armv8a", generic_features->GetFeatureString().c_str());
EXPECT_EQ(generic_features->AsBitmap(), 0U);
}
diff --git a/runtime/arch/arm/instruction_set_features_assembly_tests.S b/runtime/arch/arm/instruction_set_features_assembly_tests.S
index c1086df..5c7f202 100644
--- a/runtime/arch/arm/instruction_set_features_assembly_tests.S
+++ b/runtime/arch/arm/instruction_set_features_assembly_tests.S
@@ -17,22 +17,49 @@
#include "asm_support_arm.S"
.section .text
-// This function is used to check for the CPU's support for the sdiv
-// instruction at runtime. It will either return the value 1 or
-// will cause an invalid instruction trap (SIGILL signal). The
-// caller must arrange for the signal handler to set the r0
-// register to 0 and move the pc forward by 4 bytes (to skip
-// the invalid instruction).
+// These functions are used to check for the CPU's support for the sdiv and
+// ARMv8-A instructions at runtime. They will either return the value 1 or will
+// cause an invalid instruction trap (SIGILL signal), for which the signal handler
+// (bad_instr_handle(), in instruction_set_features_arm.cc) must arrange to set
+// the r0 register to 0 and move the pc forward by 4 bytes (to skip the invalid
+// instruction).
+// Note: For ARM T32, instructions can be either 16b or 32b, but bad_instr_handle()
+// deals only with 32b instructions for now.
+
ENTRY artCheckForArmSdivInstruction
mov r1,#1
- // depending on the architecture, the assembler will not allow an
+ // Depending on the architecture, the assembler will not allow an
// sdiv instruction, so we will have to output the bytes directly.
- // sdiv r0,r1,r1 is two words: 0xfb91 0xf1f0. We need little endian.
- .byte 0x91,0xfb,0xf1,0xf0
+ // The T32 encoding for sdiv r0,r1,r1 is two 16bit words: 0xfb91 0xf0f1, with little endianness.
+ .byte 0x91,0xfb
+ .byte 0xf1,0xf0
- // if the divide worked, r0 will have the value #1 (result of sdiv).
+ // If the divide worked, r0 will have the value #1 (result of sdiv).
// It will have 0 otherwise (set by the signal handler)
// the value is just returned from this function.
bx lr
END artCheckForArmSdivInstruction
+
+ENTRY artCheckForArmv8AInstructions
+ // Depending on the architecture, the assembler will not allow a
+ // `vrint` instruction, so we will have to output the bytes directly.
+
+ // Move `true` into the result register. The signal handler will set it to 0
+ // if execution of the instruction below fails
+ mov r0,#1
+
+ // Store S0 in the caller saved R1. If the instruction below succeeds, S0 will
+ // be clobbered but it will not be caller saved (ARM still uses soft FP).
+ vmov r1, s0
+
+ // The T32 encoding for vrinta.f32.f32 s0,s0 is two 16bit words: 0xfeb8,0x0a40, with little
+ // endianness.
+ .byte 0xb8,0xfe
+ .byte 0x40,0x0a
+
+ // Restore S0 (see above comment).
+ vmov s0, r1
+
+ bx lr
+END artCheckForArmv8AInstructions
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index db1cad6..a443a40 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -351,14 +351,13 @@
DELIVER_PENDING_EXCEPTION
.endm
-// Macros taking opportunity of code similarities for downcalls with referrer for non-wide fields.
+// Macros taking opportunity of code similarities for downcalls.
.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case of GC
- ldr r1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
- mov r2, r9 @ pass Thread::Current
- bl \entrypoint @ (uint32_t field_idx, const Method* referrer, Thread*)
+ mov r1, r9 @ pass Thread::Current
+ bl \entrypoint @ (uint32_t field_idx, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
@@ -368,9 +367,8 @@
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- ldr r2, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
- mov r3, r9 @ pass Thread::Current
- bl \entrypoint @ (field_idx, Object*, referrer, Thread*)
+ mov r2, r9 @ pass Thread::Current
+ bl \entrypoint @ (field_idx, Object*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
@@ -380,12 +378,8 @@
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r3 @ save callee saves in case of GC
- ldr r3, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
- str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
- .cfi_adjust_cfa_offset 16
- bl \entrypoint @ (field_idx, Object*, new_val, referrer, Thread*)
- add sp, #16 @ release out args
- .cfi_adjust_cfa_offset -16
+ mov r3, r9 @ pass Thread::Current
+ bl \entrypoint @ (field_idx, Object*, new_val, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
\return
END \name
@@ -978,21 +972,20 @@
/*
* Called by managed code to resolve a static field and load a non-wide value.
*/
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
/*
* Called by managed code to resolve a static field and load a 64-bit primitive value.
*/
- .extern artGet64StaticFromCode
+ .extern artGet64StaticFromCompiledCode
ENTRY art_quick_get64_static
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- ldr r1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
- mov r2, r9 @ pass Thread::Current
- bl artGet64StaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*)
+ mov r1, r9 @ pass Thread::Current
+ bl artGet64StaticFromCompiledCode @ (uint32_t field_idx, Thread*)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
RESTORE_SAVE_REFS_ONLY_FRAME
cbnz r2, 1f @ success if no exception pending
@@ -1004,21 +997,20 @@
/*
* Called by managed code to resolve an instance field and load a non-wide value.
*/
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
/*
* Called by managed code to resolve an instance field and load a 64-bit primitive value.
*/
- .extern artGet64InstanceFromCode
+ .extern artGet64InstanceFromCompiledCode
ENTRY art_quick_get64_instance
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- ldr r2, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
- mov r3, r9 @ pass Thread::Current
- bl artGet64InstanceFromCode @ (field_idx, Object*, referrer, Thread*)
+ mov r2, r9 @ pass Thread::Current
+ bl artGet64InstanceFromCompiledCode @ (field_idx, Object*, Thread*)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
RESTORE_SAVE_REFS_ONLY_FRAME
cbnz r2, 1f @ success if no exception pending
@@ -1028,51 +1020,31 @@
END art_quick_get64_instance
/*
- * Called by managed code to resolve a static field and store a non-wide value.
+ * Called by managed code to resolve a static field and store a value.
*/
-TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
-TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
-TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
-TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
- /*
- * Called by managed code to resolve a static field and store a 64-bit primitive value.
- * On entry r0 holds field index, r2:r3 hold new_val
- */
- .extern artSet64StaticFromCode
-ENTRY art_quick_set64_static
- SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case of GC
- @ r2:r3 contain the wide argument
- ldr r1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
- str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
- .cfi_adjust_cfa_offset 16
- bl artSet64StaticFromCode @ (field_idx, referrer, new_val, Thread*)
- add sp, #16 @ release out args
- .cfi_adjust_cfa_offset -16
- RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
- RETURN_IF_RESULT_IS_ZERO
- DELIVER_PENDING_EXCEPTION
-END art_quick_set64_static
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
/*
* Called by managed code to resolve an instance field and store a non-wide value.
*/
-THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+
/*
- * Called by managed code to resolve an instance field and store a 64-bit primitive value.
+ * Called by managed code to resolve an instance field and store a wide value.
*/
- .extern artSet64InstanceFromCode
+ .extern artSet64InstanceFromCompiledCode
ENTRY art_quick_set64_instance
SETUP_SAVE_REFS_ONLY_FRAME r12 @ save callee saves in case of GC
@ r2:r3 contain the wide argument
- ldr r12, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
- str r9, [sp, #-12]! @ expand the frame and pass Thread::Current
- .cfi_adjust_cfa_offset 12
- str r12, [sp, #-4]! @ expand the frame and pass the referrer
- .cfi_adjust_cfa_offset 4
- bl artSet64InstanceFromCode @ (field_idx, Object*, new_val, Method* referrer, Thread*)
+ str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
+ .cfi_adjust_cfa_offset 16
+ bl artSet64InstanceFromCompiledCode @ (field_idx, Object*, new_val, Thread*)
add sp, #16 @ release out args
.cfi_adjust_cfa_offset -16
RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
@@ -1080,6 +1052,20 @@
DELIVER_PENDING_EXCEPTION
END art_quick_set64_instance
+ .extern artSet64StaticFromCompiledCode
+ENTRY art_quick_set64_static
+ SETUP_SAVE_REFS_ONLY_FRAME r12 @ save callee saves in case of GC
+ @ r2:r3 contain the wide argument
+ str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
+ .cfi_adjust_cfa_offset 16
+ bl artSet64StaticFromCompiledCode @ (field_idx, new_val, Thread*)
+ add sp, #16 @ release out args
+ .cfi_adjust_cfa_offset -16
+ RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
+ RETURN_IF_RESULT_IS_ZERO
+ DELIVER_PENDING_EXCEPTION
+END art_quick_set64_static
+
/*
* Entry from managed code to resolve a string, this stub will
* check the dex cache for a matching string (the fast path), and if not found,
@@ -1100,11 +1086,37 @@
DELIVER_PENDING_EXCEPTION_FRAME_READY
END art_quick_resolve_string
+
// Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
+// Comment out allocators that have arm specific asm.
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
+
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_resolved_rosalloc
+.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name
+ENTRY \c_name
// Fast path rosalloc allocation.
// r0: type/return value, r9: Thread::Current
// r1, r2, r3, r12: free.
@@ -1113,13 +1125,13 @@
// TODO: consider using ldrd.
ldr r12, [r9, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp r3, r12
- bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ bhs .Lslow_path\c_name
ldr r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3)
cmp r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
// local allocation. Also does the
// initialized and finalizable checks.
- bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ bhs .Lslow_path\c_name
// Compute the rosalloc bracket index
// from the size. Since the size is
// already aligned we can combine the
@@ -1133,7 +1145,7 @@
// Load the free list head (r3). This
// will be the return val.
ldr r3, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
- cbz r3, .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ cbz r3, .Lslow_path\c_name
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
ldr r1, [r3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
// and update the list head with the
@@ -1178,16 +1190,20 @@
mov r0, r3 // Set the return value and return.
bx lr
-.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
+.Lslow_path\c_name:
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
mov r1, r9 @ pass Thread::Current
- bl artAllocObjectFromCodeResolvedRosAlloc @ (mirror::Class* cls, Thread*)
+ bl \cxx_name @ (mirror::Class* cls, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_resolved_rosalloc
+END \c_name
+.endm
-// The common fast path code for art_quick_alloc_object_resolved_tlab
-// and art_quick_alloc_object_resolved_region_tlab.
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
+
+// The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
+// and art_quick_alloc_object_resolved/initialized_region_tlab.
//
// r0: type r9: Thread::Current, r1, r2, r3, r12: free.
// Need to preserve r0 to the slow path.
@@ -1226,41 +1242,173 @@
bx lr
.endm
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_tlab, TLAB).
-ENTRY art_quick_alloc_object_resolved_tlab
+// The common code for art_quick_alloc_object_*region_tlab
+.macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint
+ENTRY \name
// Fast path tlab allocation.
// r0: type, r9: Thread::Current
// r1, r2, r3, r12: free.
-#if defined(USE_READ_BARRIER)
- mvn r0, #0 // Read barrier not supported here.
- bx lr // Return -1.
-#endif
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
-.Lart_quick_alloc_object_resolved_tlab_slow_path:
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path\name
+.Lslow_path\name:
SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC.
mov r1, r9 // Pass Thread::Current.
- bl artAllocObjectFromCodeResolvedTLAB // (mirror::Class* klass, Thread*)
+ bl \entrypoint // (mirror::Class* klass, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_resolved_tlab
+END \name
+.endm
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
-ENTRY art_quick_alloc_object_resolved_region_tlab
- // Fast path tlab allocation.
- // r0: type, r9: Thread::Current, r1, r2, r3, r12: free.
-#if !defined(USE_READ_BARRIER)
- eor r0, r0, r0 // Read barrier must be enabled here.
- sub r0, r0, #1 // Return -1.
- bx lr
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB
+
+
+// The common fast path code for art_quick_alloc_array_resolved/initialized_tlab
+// and art_quick_alloc_array_resolved/initialized_region_tlab.
+//
+// r0: type r1: component_count r2: total_size r9: Thread::Current, r3, r12: free.
+// Need to preserve r0 and r1 to the slow path.
+.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel
+ and r2, r2, #OBJECT_ALIGNMENT_MASK_TOGGLED // Apply alignemnt mask
+ // (addr + 7) & ~7.
+
+ // Load thread_local_pos (r3) and
+ // thread_local_end (r12) with ldrd.
+ // Check constraints for ldrd.
+#if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0))
+#error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance"
#endif
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
-.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC.
- mov r1, r9 // Pass Thread::Current.
- bl artAllocObjectFromCodeResolvedRegionTLAB // (mirror::Class* klass, Thread*)
+ ldrd r3, r12, [r9, #THREAD_LOCAL_POS_OFFSET]
+ sub r12, r12, r3 // Compute the remaining buf size.
+ cmp r2, r12 // Check if the total_size fits.
+ bhi \slowPathLabel
+ // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
+ add r2, r2, r3
+ str r2, [r9, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
+ ldr r2, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
+ add r2, r2, #1
+ str r2, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
+ POISON_HEAP_REF r0
+ str r0, [r3, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
+ str r1, [r3, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length.
+ // Fence. This is "ish" not "ishst" so
+ // that the code after this allocation
+ // site will see the right values in
+ // the fields of the class.
+ // Alternatively we could use "ishst"
+ // if we use load-acquire for the
+ // object size load.)
+ mov r0, r3
+ dmb ish
+ bx lr
+.endm
+
+.macro GENERATE_ALLOC_ARRAY_TLAB name, entrypoint, size_setup
+ENTRY \name
+ // Fast path array allocation for region tlab allocation.
+ // r0: mirror::Class* type
+ // r1: int32_t component_count
+ // r9: thread
+ // r2, r3, r12: free.
+ \size_setup .Lslow_path\name
+ ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\name
+.Lslow_path\name:
+ // r0: mirror::Class* klass
+ // r1: int32_t component_count
+ // r2: Thread* self
+ SETUP_SAVE_REFS_ONLY_FRAME r2 // save callee saves in case of GC
+ mov r2, r9 // pass Thread::Current
+ bl \entrypoint
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_resolved_region_tlab
+END \name
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_UNKNOWN slow_path
+ bkpt // We should never enter here.
+ // Code below is for reference.
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_WIDE_ARRAY_DATA_OFFSET) / 8)
+ cmp r1, r2
+ bhi \slow_path
+ // Array classes are never finalizable
+ // or uninitialized, no need to check.
+ ldr r3, [r0, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Load component type
+ UNPOISON_HEAP_REF r3
+ ldr r3, [r3, #MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET]
+ lsr r3, r3, #PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT // Component size shift is in high 16
+ // bits.
+ lsl r2, r1, r3 // Calculate data size
+ // Add array data offset and alignment.
+ add r2, r2, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+#if MIRROR_WIDE_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
+#error Long array data offset must be 4 greater than int array data offset.
+#endif
+
+ add r3, r3, #1 // Add 4 to the length only if the
+ // component size shift is 3
+ // (for 64 bit alignment).
+ and r3, r3, #4
+ add r2, r2, r3
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_8 slow_path
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ movw r2, #(MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET)
+ cmp r1, r2
+ bhi \slow_path
+ // Add array data offset and alignment.
+ add r2, r1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_16 slow_path
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 2)
+ cmp r1, r2
+ bhi \slow_path
+ lsl r2, r1, #1
+ // Add array data offset and alignment.
+ add r2, r2, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_32 slow_path
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 4)
+ cmp r1, r2
+ bhi \slow_path
+ lsl r2, r1, #2
+ // Add array data offset and alignment.
+ add r2, r2, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_64 slow_path
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_LONG_ARRAY_DATA_OFFSET) / 8)
+ cmp r1, r2
+ bhi \slow_path
+ lsl r2, r1, #3
+ // Add array data offset and alignment.
+ add r2, r2, #(MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+# TODO(ngeoffray): art_quick_alloc_array_resolved_region_tlab is not used for arm, remove
+# the entrypoint once all backends have been updated to use the size variants.
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 00518e1..219d8b4 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1519,14 +1519,13 @@
END \name
.endm
-// Macros taking opportunity of code similarities for downcalls with referrer.
+// Macros taking opportunity of code similarities for downcalls.
.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
- ldr x1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer
- mov x2, xSELF // pass Thread::Current
- bl \entrypoint // (uint32_t type_idx, Method* method, Thread*, SP)
+ mov x1, xSELF // pass Thread::Current
+ bl \entrypoint // (uint32_t type_idx, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
@@ -1536,8 +1535,7 @@
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
- ldr x2, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer
- mov x3, xSELF // pass Thread::Current
+ mov x2, xSELF // pass Thread::Current
bl \entrypoint
RESTORE_SAVE_REFS_ONLY_FRAME
\return
@@ -1548,8 +1546,7 @@
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
- ldr x3, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer
- mov x4, xSELF // pass Thread::Current
+ mov x3, xSELF // pass Thread::Current
bl \entrypoint
RESTORE_SAVE_REFS_ONLY_FRAME
\return
@@ -1579,44 +1576,33 @@
ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-
-// This is separated out as the argument order is different.
- .extern artSet64StaticFromCode
-ENTRY art_quick_set64_static
- SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
- ldr x1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer
- // x2 contains the parameter
- mov x3, xSELF // pass Thread::Current
- bl artSet64StaticFromCode
- RESTORE_SAVE_REFS_ONLY_FRAME
- RETURN_IF_W0_IS_ZERO_OR_DELIVER
-END art_quick_set64_static
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
/*
* Entry from managed code to resolve a string, this stub will
@@ -1640,18 +1626,34 @@
END art_quick_resolve_string
// Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
// Comment out allocators that have arm64 specific asm.
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_resolved_rosalloc
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
+
+.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name
+ENTRY \c_name
// Fast path rosalloc allocation.
// x0: type, xSELF(x19): Thread::Current
// x1-x7: free.
@@ -1660,13 +1662,13 @@
// ldp won't work due to large offset.
ldr x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp x3, x4
- bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ bhs .Lslow_path\c_name
ldr w3, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3)
cmp x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
// local allocation. Also does the
// finalizable and initialization
// checks.
- bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ bhs .Lslow_path\c_name
// Compute the rosalloc bracket index
// from the size. Since the size is
// already aligned we can combine the
@@ -1679,7 +1681,7 @@
// Load the free list head (x3). This
// will be the return val.
ldr x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
- cbz x3, .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ cbz x3, .Lslow_path\c_name
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
ldr x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
// and update the list head with the
@@ -1723,37 +1725,67 @@
mov x0, x3 // Set the return value and return.
ret
-.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
+.Lslow_path\c_name:
SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
mov x1, xSELF // pass Thread::Current
- bl artAllocObjectFromCodeResolvedRosAlloc // (mirror::Class* klass, Thread*)
+ bl \cxx_name
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_resolved_rosalloc
+END \c_name
+.endm
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
-// The common fast path code for art_quick_alloc_array_region_tlab.
-.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
- // Array classes are never finalizable or uninitialized, no need to check.
- ldr \wTemp0, [\xClass, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Load component type
- UNPOISON_HEAP_REF \wTemp0
- ldr \wTemp0, [\xTemp0, #MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET]
- lsr \xTemp0, \xTemp0, #PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT // Component size shift is in high 16
- // bits.
- // xCount is holding a 32 bit value,
- // it can not overflow.
- lsl \xTemp1, \xCount, \xTemp0 // Calculate data size
- // Add array data offset and alignment.
- add \xTemp1, \xTemp1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-#if MIRROR_LONG_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
-#error Long array data offset must be 4 greater than int array data offset.
-#endif
+.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel
+ ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET]
+ ldr x5, [xSELF, #THREAD_LOCAL_END_OFFSET]
+ ldr w7, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7).
+ add x6, x4, x7 // Add object size to tlab pos.
+ cmp x6, x5 // Check if it fits, overflow works
+ // since the tlab pos and end are 32
+ // bit values.
+ bhi \slowPathLabel
+ str x6, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
+ ldr x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
+ add x5, x5, #1
+ str x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
+ POISON_HEAP_REF w0
+ str w0, [x4, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
+ // Fence. This is "ish" not "ishst" so
+ // that the code after this allocation
+ // site will see the right values in
+ // the fields of the class.
+ // Alternatively we could use "ishst"
+ // if we use load-acquire for the
+ // object size load.)
+ mov x0, x4
+ dmb ish
+ ret
+.endm
- add \xTemp0, \xTemp0, #1 // Add 4 to the length only if the
- // component size shift is 3
- // (for 64 bit alignment).
- and \xTemp0, \xTemp0, #4
- add \xTemp1, \xTemp1, \xTemp0
+// The common code for art_quick_alloc_object_*region_tlab
+.macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint
+ENTRY \name
+ // Fast path region tlab allocation.
+ // x0: type, xSELF(x19): Thread::Current
+ // x1-x7: free.
+ ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED .Lslow_path\name
+.Lslow_path\name:
+ SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
+ mov x1, xSELF // Pass Thread::Current.
+ bl \entrypoint // (mirror::Class*, Thread*)
+ RESTORE_SAVE_REFS_ONLY_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END \name
+.endm
+
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB
+
+.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
and \xTemp1, \xTemp1, #OBJECT_ALIGNMENT_MASK_TOGGLED64 // Apply alignemnt mask
// (addr + 7) & ~7. The mask must
// be 64 bits to keep high bits in
@@ -1791,96 +1823,15 @@
ret
.endm
-// TODO: delete ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since it is the same as
-// ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED.
-.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel
- ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED \slowPathLabel
-.endm
-
-.macro ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED slowPathLabel
- ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET]
- ldr x5, [xSELF, #THREAD_LOCAL_END_OFFSET]
- ldr w7, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7).
- add x6, x4, x7 // Add object size to tlab pos.
- cmp x6, x5 // Check if it fits, overflow works
- // since the tlab pos and end are 32
- // bit values.
- bhi \slowPathLabel
- str x6, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
- ldr x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
- add x5, x5, #1
- str x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
- POISON_HEAP_REF w0
- str w0, [x4, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
- // Fence. This is "ish" not "ishst" so
- // that the code after this allocation
- // site will see the right values in
- // the fields of the class.
- // Alternatively we could use "ishst"
- // if we use load-acquire for the
- // object size load.)
- mov x0, x4
- dmb ish
- ret
-.endm
-
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB).
-ENTRY art_quick_alloc_object_resolved_tlab
- // Fast path tlab allocation.
- // x0: type, xSELF(x19): Thread::Current
- // x1-x7: free.
-#if defined(USE_READ_BARRIER)
- mvn x0, xzr // Read barrier not supported here.
- ret // Return -1.
-#endif
- ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED .Lart_quick_alloc_object_resolved_tlab_slow_path
-.Lart_quick_alloc_object_resolved_tlab_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
- mov x1, xSELF // Pass Thread::Current.
- bl artAllocObjectFromCodeResolvedTLAB // (mirror::Class*, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_resolved_tlab
-
-// The common code for art_quick_alloc_object_*region_tlab
-.macro GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB name, entrypoint, fast_path
-ENTRY \name
- // Fast path region tlab allocation.
- // x0: type, xSELF(x19): Thread::Current
- // x1-x7: free.
-#if !defined(USE_READ_BARRIER)
- mvn x0, xzr // Read barrier must be enabled here.
- ret // Return -1.
-#endif
-.Ldo_allocation\name:
- \fast_path .Lslow_path\name
-.Lslow_path\name:
- SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
- mov x1, xSELF // Pass Thread::Current.
- bl \entrypoint // (mirror::Class*, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END \name
-.endm
-
-GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED
-GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED
-
-// TODO: We could use this macro for the normal tlab allocator too.
-
-// The common code for art_quick_alloc_array_*region_tlab
-.macro GENERATE_ALLOC_ARRAY_REGION_TLAB name, entrypoint, fast_path
+.macro GENERATE_ALLOC_ARRAY_TLAB name, entrypoint, size_setup
ENTRY \name
// Fast path array allocation for region tlab allocation.
// x0: mirror::Class* type
// x1: int32_t component_count
// x2-x7: free.
-#if !defined(USE_READ_BARRIER)
- mvn x0, xzr // Read barrier must be enabled here.
- ret // Return -1.
-#endif
mov x3, x0
- \fast_path .Lslow_path\name, x3, w3, x1, w1, x4, w4, x5, w5, x6, w6
+ \size_setup x3, w3, x1, w1, x4, w4, x5, w5, x6, w6
+ ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\name, x3, w3, x1, w1, x4, w4, x5, w5, x6, w6
.Lslow_path\name:
// x0: mirror::Class* klass
// x1: int32_t component_count
@@ -1893,7 +1844,64 @@
END \name
.endm
-GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED
+.macro COMPUTE_ARRAY_SIZE_UNKNOWN xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
+ // Array classes are never finalizable or uninitialized, no need to check.
+ ldr \wTemp0, [\xClass, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Load component type
+ UNPOISON_HEAP_REF \wTemp0
+ ldr \wTemp0, [\xTemp0, #MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET]
+ lsr \xTemp0, \xTemp0, #PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT // Component size shift is in high 16
+ // bits.
+ // xCount is holding a 32 bit value,
+ // it can not overflow.
+ lsl \xTemp1, \xCount, \xTemp0 // Calculate data size
+ // Add array data offset and alignment.
+ add \xTemp1, \xTemp1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+#if MIRROR_LONG_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
+#error Long array data offset must be 4 greater than int array data offset.
+#endif
+
+ add \xTemp0, \xTemp0, #1 // Add 4 to the length only if the
+ // component size shift is 3
+ // (for 64 bit alignment).
+ and \xTemp0, \xTemp0, #4
+ add \xTemp1, \xTemp1, \xTemp0
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_8 xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
+ // Add array data offset and alignment.
+ add \xTemp1, \xCount, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_16 xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
+ lsl \xTemp1, \xCount, #1
+ // Add array data offset and alignment.
+ add \xTemp1, \xTemp1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_32 xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
+ lsl \xTemp1, \xCount, #2
+ // Add array data offset and alignment.
+ add \xTemp1, \xTemp1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_64 xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
+ lsl \xTemp1, \xCount, #3
+ // Add array data offset and alignment.
+ add \xTemp1, \xTemp1, #(MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+# TODO(ngeoffray): art_quick_alloc_array_resolved_region_tlab is not used for arm64, remove
+# the entrypoint once all backends have been updated to use the size variants.
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
/*
* Called by managed code when the thread has been asked to suspend.
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
index 99aea62..7ef9a7a 100644
--- a/runtime/arch/instruction_set.h
+++ b/runtime/arch/instruction_set.h
@@ -68,8 +68,8 @@
// ARM64 instruction alignment. This is the recommended alignment for maximum performance.
static constexpr size_t kArm64Alignment = 16;
-// MIPS instruction alignment. MIPS processors require code to be 4-byte aligned.
-// TODO: Can this be 4?
+// MIPS instruction alignment. MIPS processors require code to be 4-byte aligned,
+// but 64-bit literals must be 8-byte aligned.
static constexpr size_t kMipsAlignment = 8;
// X86 instruction alignment. This is the recommended alignment for maximum performance.
@@ -80,8 +80,8 @@
static constexpr size_t kArm64InstructionAlignment = 4;
static constexpr size_t kX86InstructionAlignment = 1;
static constexpr size_t kX86_64InstructionAlignment = 1;
-static constexpr size_t kMipsInstructionAlignment = 2;
-static constexpr size_t kMips64InstructionAlignment = 2;
+static constexpr size_t kMipsInstructionAlignment = 4;
+static constexpr size_t kMips64InstructionAlignment = 4;
const char* GetInstructionSetString(InstructionSet isa);
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
index b6c5c71..5f1a507 100644
--- a/runtime/arch/instruction_set_features.h
+++ b/runtime/arch/instruction_set_features.h
@@ -67,6 +67,24 @@
// Are these features the same as the other given features?
virtual bool Equals(const InstructionSetFeatures* other) const = 0;
+ // For testing purposes we want to make sure that the system we run on has at
+ // least the options we claim it has. In this cases Equals() does not
+ // suffice and will cause the test to fail, since the runtime cpu feature
+ // detection claims more capabilities then statically specified from the
+ // build system.
+ //
+ // A good example of this is the armv8 ART test target that declares
+ // "CPU_VARIANT=generic". If the generic target is specified and the code
+ // is run on a platform with enhanced capabilities, the
+ // instruction_set_features test will fail if we resort to using Equals()
+ // between statically defined cpu features and runtime cpu features.
+ //
+ // For now we default this to Equals() in case the architecture does not
+ // provide it.
+ virtual bool HasAtLeast(const InstructionSetFeatures* other) const {
+ return Equals(other);
+ }
+
// Return the ISA these features relate to.
virtual InstructionSet GetInstructionSet() const = 0;
diff --git a/runtime/arch/instruction_set_features_test.cc b/runtime/arch/instruction_set_features_test.cc
index d489392..67e2f35 100644
--- a/runtime/arch/instruction_set_features_test.cc
+++ b/runtime/arch/instruction_set_features_test.cc
@@ -52,7 +52,7 @@
InstructionSetFeatures::FromVariant(kRuntimeISA, dex2oat_isa_variant, &error_msg));
ASSERT_TRUE(property_features.get() != nullptr) << error_msg;
- EXPECT_TRUE(property_features->Equals(instruction_set_features.get()))
+ EXPECT_TRUE(property_features->HasAtLeast(instruction_set_features.get()))
<< "System property features: " << *property_features.get()
<< "\nFeatures from build: " << *instruction_set_features.get();
}
@@ -89,7 +89,7 @@
base_features->AddFeaturesFromString(dex2oat_isa_features, &error_msg));
ASSERT_TRUE(property_features.get() != nullptr) << error_msg;
- EXPECT_TRUE(property_features->Equals(instruction_set_features.get()))
+ EXPECT_TRUE(property_features->HasAtLeast(instruction_set_features.get()))
<< "System property features: " << *property_features.get()
<< "\nFeatures from build: " << *instruction_set_features.get();
}
@@ -109,7 +109,7 @@
// Check we get the same instruction set features using /proc/cpuinfo.
std::unique_ptr<const InstructionSetFeatures> cpuinfo_features(
InstructionSetFeatures::FromCpuInfo());
- EXPECT_TRUE(cpuinfo_features->Equals(instruction_set_features.get()))
+ EXPECT_TRUE(cpuinfo_features->HasAtLeast(instruction_set_features.get()))
<< "CPU Info features: " << *cpuinfo_features.get()
<< "\nFeatures from build: " << *instruction_set_features.get();
}
@@ -124,7 +124,7 @@
std::unique_ptr<const InstructionSetFeatures> cpp_features(
InstructionSetFeatures::FromCppDefines());
- EXPECT_TRUE(default_features->Equals(cpp_features.get()))
+ EXPECT_TRUE(cpp_features->HasAtLeast(default_features.get()))
<< "Default variant features: " << *default_features.get()
<< "\nFeatures from build: " << *cpp_features.get();
}
@@ -143,7 +143,7 @@
// Check we get the same instruction set features using AT_HWCAP.
std::unique_ptr<const InstructionSetFeatures> hwcap_features(
InstructionSetFeatures::FromHwcap());
- EXPECT_TRUE(hwcap_features->Equals(instruction_set_features.get()))
+ EXPECT_TRUE(hwcap_features->HasAtLeast(instruction_set_features.get()))
<< "Hwcap features: " << *hwcap_features.get()
<< "\nFeatures from build: " << *instruction_set_features.get();
}
@@ -156,7 +156,7 @@
// Check we get the same instruction set features using assembly tests.
std::unique_ptr<const InstructionSetFeatures> assembly_features(
InstructionSetFeatures::FromAssembly());
- EXPECT_TRUE(assembly_features->Equals(instruction_set_features.get()))
+ EXPECT_TRUE(assembly_features->HasAtLeast(instruction_set_features.get()))
<< "Assembly features: " << *assembly_features.get()
<< "\nFeatures from build: " << *instruction_set_features.get();
}
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 76218fb..663cb6c 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1450,316 +1450,83 @@
move $a2, rSELF # pass Thread::Current
END art_quick_aput_obj
- /*
- * Called by managed code to resolve a static field and load a boolean primitive value.
- */
- .extern artGetBooleanStaticFromCode
-ENTRY art_quick_get_boolean_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetBooleanStaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_boolean_static
- /*
- * Called by managed code to resolve a static field and load a byte primitive value.
- */
- .extern artGetByteStaticFromCode
-ENTRY art_quick_get_byte_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetByteStaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_byte_static
+// Macros taking opportunity of code similarities for downcalls.
+.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ la $t9, \entrypoint
+ jalr $t9 # (field_idx, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
+
+.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ la $t9, \entrypoint
+ jalr $t9 # (field_idx, Object*, Thread*) or
+ # (field_idx, new_val, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
+
+.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ la $t9, \entrypoint
+ jalr $t9 # (field_idx, Object*, new_val, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
+
+.macro FOUR_ARG_REF_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ la $t9, \entrypoint
+ jalr $t9 # (field_idx, Object*, 64-bit new_val, Thread*) or
+ # (field_idx, 64-bit new_val, Thread*)
+ # Note that a 64-bit new_val needs to be aligned with
+ # an even-numbered register, hence A1 may be skipped
+ # for new_val to reside in A2-A3.
+ sw rSELF, 16($sp) # pass Thread::Current
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
/*
- * Called by managed code to resolve a static field and load a char primitive value.
+ * Called by managed code to resolve a static/instance field and load/store a value.
*/
- .extern artGetCharStaticFromCode
-ENTRY art_quick_get_char_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetCharStaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_char_static
- /*
- * Called by managed code to resolve a static field and load a short primitive value.
- */
- .extern artGetShortStaticFromCode
-ENTRY art_quick_get_short_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetShortStaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_short_static
-
- /*
- * Called by managed code to resolve a static field and load a 32-bit primitive value.
- */
- .extern artGet32StaticFromCode
-ENTRY art_quick_get32_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGet32StaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get32_static
-
- /*
- * Called by managed code to resolve a static field and load a 64-bit primitive value.
- */
- .extern artGet64StaticFromCode
-ENTRY art_quick_get64_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGet64StaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get64_static
-
- /*
- * Called by managed code to resolve a static field and load an object reference.
- */
- .extern artGetObjStaticFromCode
-ENTRY art_quick_get_obj_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetObjStaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_static
-
- /*
- * Called by managed code to resolve an instance field and load a boolean primitive value.
- */
- .extern artGetBooleanInstanceFromCode
-ENTRY art_quick_get_boolean_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetBooleanInstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_boolean_instance
- /*
- * Called by managed code to resolve an instance field and load a byte primitive value.
- */
- .extern artGetByteInstanceFromCode
-ENTRY art_quick_get_byte_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetByteInstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_byte_instance
-
- /*
- * Called by managed code to resolve an instance field and load a char primitive value.
- */
- .extern artGetCharInstanceFromCode
-ENTRY art_quick_get_char_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetCharInstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_char_instance
- /*
- * Called by managed code to resolve an instance field and load a short primitive value.
- */
- .extern artGetShortInstanceFromCode
-ENTRY art_quick_get_short_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetShortInstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_short_instance
-
- /*
- * Called by managed code to resolve an instance field and load a 32-bit primitive value.
- */
- .extern artGet32InstanceFromCode
-ENTRY art_quick_get32_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGet32InstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get32_instance
-
- /*
- * Called by managed code to resolve an instance field and load a 64-bit primitive value.
- */
- .extern artGet64InstanceFromCode
-ENTRY art_quick_get64_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGet64InstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get64_instance
-
- /*
- * Called by managed code to resolve an instance field and load an object reference.
- */
- .extern artGetObjInstanceFromCode
-ENTRY art_quick_get_obj_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetObjInstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_instance
-
- /*
- * Called by managed code to resolve a static field and store a 8-bit primitive value.
- */
- .extern artSet8StaticFromCode
-ENTRY art_quick_set8_static
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet8StaticFromCode
- jalr $t9 # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set8_static
-
- /*
- * Called by managed code to resolve a static field and store a 16-bit primitive value.
- */
- .extern artSet16StaticFromCode
-ENTRY art_quick_set16_static
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet16StaticFromCode
- jalr $t9 # (field_idx, new_val, referrer, Thread*, $sp)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set16_static
-
- /*
- * Called by managed code to resolve a static field and store a 32-bit primitive value.
- */
- .extern artSet32StaticFromCode
-ENTRY art_quick_set32_static
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet32StaticFromCode
- jalr $t9 # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set32_static
-
- /*
- * Called by managed code to resolve a static field and store a 64-bit primitive value.
- */
- .extern artSet64StaticFromCode
-ENTRY art_quick_set64_static
- lw $a1, 0($sp) # pass referrer's Method*
- # 64 bit new_val is in a2:a3 pair
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet64StaticFromCode
- jalr $t9 # (field_idx, referrer, new_val, Thread*)
- sw rSELF, 16($sp) # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set64_static
-
- /*
- * Called by managed code to resolve a static field and store an object reference.
- */
- .extern artSetObjStaticFromCode
-ENTRY art_quick_set_obj_static
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSetObjStaticFromCode
- jalr $t9 # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set_obj_static
-
- /*
- * Called by managed code to resolve an instance field and store a 8-bit primitive value.
- */
- .extern artSet8InstanceFromCode
-ENTRY art_quick_set8_instance
- lw $a3, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet8InstanceFromCode
- jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
- sw rSELF, 16($sp) # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set8_instance
-
- /*
- * Called by managed code to resolve an instance field and store a 16-bit primitive value.
- */
- .extern artSet16InstanceFromCode
-ENTRY art_quick_set16_instance
- lw $a3, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet16InstanceFromCode
- jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
- sw rSELF, 16($sp) # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set16_instance
-
- /*
- * Called by managed code to resolve an instance field and store a 32-bit primitive value.
- */
- .extern artSet32InstanceFromCode
-ENTRY art_quick_set32_instance
- lw $a3, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet32InstanceFromCode
- jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
- sw rSELF, 16($sp) # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set32_instance
-
- /*
- * Called by managed code to resolve an instance field and store a 64-bit primitive value.
- */
- .extern artSet64InstanceFromCode
-ENTRY art_quick_set64_instance
- lw $t1, 0($sp) # load referrer's Method*
- # 64 bit new_val is in a2:a3 pair
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- sw rSELF, 20($sp) # pass Thread::Current
- la $t9, artSet64InstanceFromCode
- jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
- sw $t1, 16($sp) # pass referrer's Method*
- RETURN_IF_ZERO
-END art_quick_set64_instance
-
- /*
- * Called by managed code to resolve an instance field and store an object reference.
- */
- .extern artSetObjInstanceFromCode
-ENTRY art_quick_set_obj_instance
- lw $a3, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSetObjInstanceFromCode
- jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
- sw rSELF, 16($sp) # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set_obj_instance
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_ZERO
+FOUR_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_ZERO
+FOUR_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_ZERO
// Macro to facilitate adding new allocation entrypoints.
.macro ONE_ARG_DOWNCALL name, entrypoint, return
@@ -1811,6 +1578,7 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index b53fd10..5fee575 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1416,296 +1416,77 @@
move $a2, rSELF # pass Thread::Current
END art_quick_aput_obj
- /*
- * Called by managed code to resolve a static field and load a boolean primitive value.
- */
- .extern artGetBooleanStaticFromCode
-ENTRY art_quick_get_boolean_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetBooleanStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_boolean_static
+// Macros taking opportunity of code similarities for downcalls.
+.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return, extend=0
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ dla $t9, \entrypoint
+ jalr $t9 # (field_idx, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ .if \extend
+ sll $v0, $v0, 0 # sign-extend 32-bit result
+ .endif
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
+
+.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return, extend=0
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ dla $t9, \entrypoint
+ jalr $t9 # (field_idx, Object*, Thread*) or
+ # (field_idx, new_val, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ .if \extend
+ sll $v0, $v0, 0 # sign-extend 32-bit result
+ .endif
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
+
+.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return, extend=0
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ dla $t9, \entrypoint
+ jalr $t9 # (field_idx, Object*, new_val, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ .if \extend
+ sll $v0, $v0, 0 # sign-extend 32-bit result
+ .endif
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
/*
- * Called by managed code to resolve a static field and load a byte primitive value.
+ * Called by managed code to resolve a static/instance field and load/store a value.
*/
- .extern artGetByteStaticFromCode
-ENTRY art_quick_get_byte_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetByteStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_byte_static
-
- /*
- * Called by managed code to resolve a static field and load a char primitive value.
- */
- .extern artGetCharStaticFromCode
-ENTRY art_quick_get_char_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetCharStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_char_static
-
- /*
- * Called by managed code to resolve a static field and load a short primitive value.
- */
- .extern artGetShortStaticFromCode
-ENTRY art_quick_get_short_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetShortStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_short_static
-
- /*
- * Called by managed code to resolve a static field and load a 32-bit primitive value.
- */
- .extern artGet32StaticFromCode
-ENTRY art_quick_get32_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- sll $v0, $v0, 0 # sign-extend result
- RETURN_IF_NO_EXCEPTION
-END art_quick_get32_static
-
- /*
- * Called by managed code to resolve a static field and load a 64-bit primitive value.
- */
- .extern artGet64StaticFromCode
-ENTRY art_quick_get64_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get64_static
-
- /*
- * Called by managed code to resolve a static field and load an object reference.
- */
- .extern artGetObjStaticFromCode
-ENTRY art_quick_get_obj_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_static
-
- /*
- * Called by managed code to resolve an instance field and load a boolean primitive value.
- */
- .extern artGetBooleanInstanceFromCode
-ENTRY art_quick_get_boolean_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetBooleanInstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_boolean_instance
-
- /*
- * Called by managed code to resolve an instance field and load a byte primitive value.
- */
- .extern artGetByteInstanceFromCode
-ENTRY art_quick_get_byte_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetByteInstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_byte_instance
-
- /*
- * Called by managed code to resolve an instance field and load a char primitive value.
- */
- .extern artGetCharInstanceFromCode
-ENTRY art_quick_get_char_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetCharInstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_char_instance
-
- /*
- * Called by managed code to resolve an instance field and load a short primitive value.
- */
- .extern artGetShortInstanceFromCode
-ENTRY art_quick_get_short_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetShortInstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_short_instance
-
- /*
- * Called by managed code to resolve an instance field and load a 32-bit primitive value.
- */
- .extern artGet32InstanceFromCode
-ENTRY art_quick_get32_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- sll $v0, $v0, 0 # sign-extend result
- RETURN_IF_NO_EXCEPTION
-END art_quick_get32_instance
-
- /*
- * Called by managed code to resolve an instance field and load a 64-bit primitive value.
- */
- .extern artGet64InstanceFromCode
-ENTRY art_quick_get64_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get64_instance
-
- /*
- * Called by managed code to resolve an instance field and load an object reference.
- */
- .extern artGetObjInstanceFromCode
-ENTRY art_quick_get_obj_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_instance
-
- /*
- * Called by managed code to resolve a static field and store a 8-bit primitive value.
- */
- .extern artSet8StaticFromCode
-ENTRY art_quick_set8_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet8StaticFromCode # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set8_static
-
- /*
- * Called by managed code to resolve a static field and store a 16-bit primitive value.
- */
- .extern artSet16StaticFromCode
-ENTRY art_quick_set16_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet16StaticFromCode # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set16_static
-
- /*
- * Called by managed code to resolve a static field and store a 32-bit primitive value.
- */
- .extern artSet32StaticFromCode
-ENTRY art_quick_set32_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set32_static
-
- /*
- * Called by managed code to resolve a static field and store a 64-bit primitive value.
- */
- .extern artSet64StaticFromCode
-ENTRY art_quick_set64_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- # a2 contains the new val
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set64_static
-
- /*
- * Called by managed code to resolve a static field and store an object reference.
- */
- .extern artSetObjStaticFromCode
-ENTRY art_quick_set_obj_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set_obj_static
-
- /*
- * Called by managed code to resolve an instance field and store a 8-bit primitive value.
- */
- .extern artSet8InstanceFromCode
-ENTRY art_quick_set8_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet8InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
- move $a4, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set8_instance
-
- /*
- * Called by managed code to resolve an instance field and store a 16-bit primitive value.
- */
- .extern artSet16InstanceFromCode
-ENTRY art_quick_set16_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet16InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
- move $a4, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set16_instance
-
- /*
- * Called by managed code to resolve an instance field and store a 32-bit primitive value.
- */
- .extern artSet32InstanceFromCode
-ENTRY art_quick_set32_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
- move $a4, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set32_instance
-
- /*
- * Called by managed code to resolve an instance field and store a 64-bit primitive value.
- */
- .extern artSet64InstanceFromCode
-ENTRY art_quick_set64_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet64InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
- move $a4, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set64_instance
-
- /*
- * Called by managed code to resolve an instance field and store an object reference.
- */
- .extern artSetObjInstanceFromCode
-ENTRY art_quick_set_obj_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
- move $a4, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set_obj_instance
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION, 1
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION, 1
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_ZERO
// Macro to facilitate adding new allocation entrypoints.
.macro ONE_ARG_DOWNCALL name, entrypoint, return
@@ -1753,6 +1534,7 @@
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index e79dc60..2b3525b 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -30,6 +30,11 @@
THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars\c_suffix, artAllocStringFromCharsFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate a string from string
ONE_ARG_DOWNCALL art_quick_alloc_string_from_string\c_suffix, artAllocStringFromStringFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+
+TWO_ARG_DOWNCALL art_quick_alloc_array_resolved8\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+TWO_ARG_DOWNCALL art_quick_alloc_array_resolved16\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+TWO_ARG_DOWNCALL art_quick_alloc_array_resolved32\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+TWO_ARG_DOWNCALL art_quick_alloc_array_resolved64\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
.endm
.macro GENERATE_ALL_ALLOC_ENTRYPOINTS
@@ -56,14 +61,22 @@
ONE_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks ## c_suffix, artAllocObjectFromCodeWithChecks ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_array_resolved ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(c_suffix, cxx_suffix) \
FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes ## c_suffix, artAllocStringFromBytesFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(c_suffix, cxx_suffix) \
THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars ## c_suffix, artAllocStringFromCharsFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(c_suffix, cxx_suffix) \
ONE_ARG_DOWNCALL art_quick_alloc_string_from_string ## c_suffix, artAllocStringFromStringFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_array_resolved ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_array_resolved8 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_array_resolved16 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_array_resolved32 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_array_resolved64 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
@@ -76,6 +89,10 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
@@ -87,6 +104,10 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
@@ -102,6 +123,10 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc, DlMalloc)
@@ -110,15 +135,23 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc_instrumented, DlMallocInstrumented)
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc, RosAlloc)
@@ -127,6 +160,10 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc_instrumented, RosAllocInstrumented)
@@ -135,6 +172,10 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer, BumpPointer)
@@ -143,6 +184,10 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer_instrumented, BumpPointerInstrumented)
@@ -151,6 +196,10 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab_instrumented, TLABInstrumented)
@@ -159,6 +208,10 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region, Region)
@@ -167,6 +220,10 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_instrumented, RegionInstrumented)
@@ -175,6 +232,10 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab_instrumented, RegionTLABInstrumented)
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 547b57e..0bf08a6 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1051,7 +1051,7 @@
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 10U,
reinterpret_cast<size_t>(nullptr),
- StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
+ StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
self);
EXPECT_FALSE(self->IsExceptionPending()) << mirror::Object::PrettyTypeOf(self->GetException());
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
@@ -1071,7 +1071,7 @@
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()),
GB, // that should fail...
reinterpret_cast<size_t>(nullptr),
- StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
+ StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
self);
EXPECT_TRUE(self->IsExceptionPending());
@@ -1610,8 +1610,8 @@
for (size_t i = 0; i < arraysize(values); ++i) {
// 64 bit FieldSet stores the set value in the second register.
test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
- 0U,
values[i],
+ 0U,
StubTest::GetEntrypoint(self, kQuickSet64Static),
self,
referrer);
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index c420259..76615e8 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -875,13 +875,12 @@
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- mov FRAME_SIZE_SAVE_REFS_ONLY(%esp), %ecx // get referrer
- PUSH eax // push padding
+ subl MACRO_LITERAL(8), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ecx // pass referrer
PUSH eax // pass arg1
- call CALLVAR(cxx_name) // cxx_name(arg1, referrer, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
@@ -893,10 +892,9 @@
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- mov FRAME_SIZE_SAVE_REFS_ONLY(%esp), %edx // get referrer
+ PUSH eax // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH edx // pass referrer
PUSH ecx // pass arg2
PUSH eax // pass arg1
call CALLVAR(cxx_name) // cxx_name(arg1, arg2, referrer, Thread*)
@@ -911,18 +909,13 @@
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- mov FRAME_SIZE_SAVE_REFS_ONLY(%esp), %ebx // get referrer
- subl MACRO_LITERAL(12), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ebx // pass referrer
PUSH edx // pass arg3
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, referrer,
- // Thread*)
- addl LITERAL(32), %esp // pop arguments
+ call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, Thread*)
+ addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
@@ -954,10 +947,37 @@
END_MACRO
// Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
+
+// Comment out allocators that have x86 specific asm.
+// Region TLAB:
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
+// Normal TLAB:
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
-DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
+MACRO2(ART_QUICK_ALLOC_OBJECT_ROSALLOC, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name)
// Fast path rosalloc allocation.
// eax: type/return value
// ecx, ebx, edx: free
@@ -966,14 +986,14 @@
// stack has room
movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %ecx
cmpl THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%ebx), %ecx
- jae .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ jae .Lslow_path\c_name
movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%eax), %ecx // Load the object size (ecx)
// Check if the size is for a thread
// local allocation. Also does the
// finalizable and initialization check.
cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %ecx
- ja .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ ja .Lslow_path\c_name
shrl LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %ecx // Calculate the rosalloc bracket index
// from object size.
// Load thread local rosalloc run (ebx)
@@ -984,7 +1004,7 @@
// Load free_list head (edi),
// this will be the return value.
movl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx), %ecx
- jecxz .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ jecxz .Lslow_path\c_name
// Point of no slow path. Won't go to
// the slow path from here on.
// Load the next pointer of the head
@@ -1015,7 +1035,7 @@
// No fence needed for x86.
movl %ecx, %eax // Move object to return register
ret
-.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
+.Lslow_path\c_name:
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
subl LITERAL(8), %esp // alignment padding
@@ -1027,10 +1047,14 @@
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
-END_FUNCTION art_quick_alloc_object_resolved_rosalloc
+ END_FUNCTION VAR(c_name)
+END_MACRO
-// The common fast path code for art_quick_alloc_object_resolved_tlab
-// and art_quick_alloc_object_resolved_region_tlab.
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
+
+// The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
+// and art_quick_alloc_object_resolved/initialized_region_tlab.
//
// EAX: type/return_value
MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH, slowPathLabel)
@@ -1054,8 +1078,8 @@
ret // Fast path succeeded.
END_MACRO
-// The common slow path code for art_quick_alloc_object_resolved_tlab
-// and art_quick_alloc_object_resolved_region_tlab.
+// The common slow path code for art_quick_alloc_object_resolved/initialized_tlab
+// and art_quick_alloc_object_resolved/initialized_region_tlab.
MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH, cxx_name)
POP edi
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
@@ -1072,33 +1096,154 @@
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). May be called
-// for CC if the GC is not marking.
-DEFINE_FUNCTION art_quick_alloc_object_resolved_tlab
+MACRO2(ART_QUICK_ALLOC_OBJECT_TLAB, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name)
// Fast path tlab allocation.
// EAX: type
// EBX, ECX, EDX: free.
PUSH edi
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
-.Lart_quick_alloc_object_resolved_tlab_slow_path:
- ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedTLAB
-END_FUNCTION art_quick_alloc_object_resolved_tlab
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path\c_name
+.Lslow_path\c_name:
+ ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH RAW_VAR(cxx_name)
+ END_FUNCTION VAR(c_name)
+END_MACRO
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB).
-DEFINE_FUNCTION art_quick_alloc_object_resolved_region_tlab
- // Fast path region tlab allocation.
- // EAX: type/return value
- // EBX, ECX, EDX: free.
-#if !defined(USE_READ_BARRIER)
+ART_QUICK_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB
+ART_QUICK_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB
+ART_QUICK_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB
+ART_QUICK_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB
+
+// The fast path code for art_quick_alloc_array_region_tlab.
+// Inputs: EAX: the class, ECX: int32_t component_count, EDX: total_size
+// Free temp: EBX
+// Output: EAX: return value.
+MACRO1(ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE, slowPathLabel)
+ mov %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
+ // Mask out the unaligned part to make sure we are 8 byte aligned.
+ andl LITERAL(OBJECT_ALIGNMENT_MASK_TOGGLED), %edx
+ movl THREAD_LOCAL_END_OFFSET(%ebx), %edi
+ subl THREAD_LOCAL_POS_OFFSET(%ebx), %edi
+ cmpl %edi, %edx // Check if it fits.
+ ja RAW_VAR(slowPathLabel)
+ movl THREAD_LOCAL_POS_OFFSET(%ebx), %edi
+ addl %edi, %edx // Add the object size.
+ movl %edx, THREAD_LOCAL_POS_OFFSET(%ebx) // Update thread_local_pos_
+ addl LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%ebx) // Increase thread_local_objects.
+ // Store the class pointer in the
+ // header.
+ // No fence needed for x86.
+ POISON_HEAP_REF eax
+ movl %eax, MIRROR_OBJECT_CLASS_OFFSET(%edi)
+ movl %ecx, MIRROR_ARRAY_LENGTH_OFFSET(%edi)
+ movl %edi, %eax
+ POP edi
+ ret // Fast path succeeded.
+END_MACRO
+
+MACRO1(COMPUTE_ARRAY_SIZE_UNKNOWN, slow_path)
+ // We should never enter here. Code is provided for reference.
int3
- int3
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ cmpl LITERAL((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_WIDE_ARRAY_DATA_OFFSET) / 8), %ecx
+ ja RAW_VAR(slow_path)
+ PUSH ecx
+ movl %ecx, %edx
+ movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%eax), %ecx // Load component type.
+ UNPOISON_HEAP_REF ecx
+ movl MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET(%ecx), %ecx // Load primitive type.
+ shr MACRO_LITERAL(PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT), %ecx // Get component size shift.
+ sall %cl, %edx // Calculate array count shifted.
+ // Add array header + alignment rounding.
+ add MACRO_LITERAL(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK), %edx
+ // Add 4 extra bytes if we are doing a long array.
+ add MACRO_LITERAL(1), %ecx
+ and MACRO_LITERAL(4), %ecx
+#if MIRROR_WIDE_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
+#error Long array data offset must be 4 greater than int array data offset.
#endif
- PUSH edi
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
-.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
- ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedRegionTLAB
-END_FUNCTION art_quick_alloc_object_resolved_region_tlab
+ addl %ecx, %edx
+ POP ecx
+END_MACRO
+MACRO1(COMPUTE_ARRAY_SIZE_8, slow_path)
+ // EAX: mirror::Class* klass, ECX: int32_t component_count
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ cmpl LITERAL(MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET), %ecx
+ ja RAW_VAR(slow_path)
+ // Add array header + alignment rounding.
+ leal (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)(%ecx), %edx
+END_MACRO
+
+MACRO1(COMPUTE_ARRAY_SIZE_16, slow_path)
+ // EAX: mirror::Class* klass, ECX: int32_t component_count
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ cmpl LITERAL((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 2), %ecx
+ ja RAW_VAR(slow_path)
+ // Add array header + alignment rounding.
+ leal ((MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) / 2)(%ecx), %edx
+ sall MACRO_LITERAL(1), %edx
+END_MACRO
+
+MACRO1(COMPUTE_ARRAY_SIZE_32, slow_path)
+ // EAX: mirror::Class* klass, ECX: int32_t component_count
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ cmpl LITERAL((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 4), %ecx
+ ja RAW_VAR(slow_path)
+ // Add array header + alignment rounding.
+ leal ((MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) / 4)(%ecx), %edx
+ sall MACRO_LITERAL(2), %edx
+END_MACRO
+
+MACRO1(COMPUTE_ARRAY_SIZE_64, slow_path)
+ // EAX: mirror::Class* klass, ECX: int32_t component_count
+ // Possibly a large object, go slow.
+ // Also does negative array size check.
+ cmpl LITERAL((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_WIDE_ARRAY_DATA_OFFSET) / 8), %ecx
+ ja RAW_VAR(slow_path)
+ // Add array header + alignment rounding.
+ leal ((MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) / 8)(%ecx), %edx
+ sall MACRO_LITERAL(3), %edx
+END_MACRO
+
+MACRO3(GENERATE_ALLOC_ARRAY_TLAB, c_entrypoint, cxx_name, size_setup)
+ DEFINE_FUNCTION VAR(c_entrypoint)
+ // EAX: mirror::Class* klass, ECX: int32_t component_count
+ PUSH edi
+ CALL_MACRO(size_setup) .Lslow_path\c_entrypoint
+ ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\c_entrypoint
+.Lslow_path\c_entrypoint:
+ POP edi
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
+ // Outgoing argument set up
+ PUSH eax // alignment padding
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH ecx
+ PUSH eax
+ call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ addl LITERAL(16), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-16)
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ END_FUNCTION VAR(c_entrypoint)
+END_MACRO
+
+
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
+
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
DEFINE_FUNCTION art_quick_resolve_string
SETUP_SAVE_EVERYTHING_FRAME ebx, ebx
@@ -1556,77 +1701,53 @@
ret
END_FUNCTION art_quick_lushr
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_EAX_ZERO
-// Call artSet64InstanceFromCode with 4 word size arguments and the referrer.
+// Call artSet64InstanceFromCode with 4 word size arguments.
DEFINE_FUNCTION art_quick_set64_instance
movd %ebx, %xmm0
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
movd %xmm0, %ebx
// Outgoing argument set up
- subl LITERAL(8), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- pushl (FRAME_SIZE_SAVE_REFS_ONLY+12)(%esp) // pass referrer
- CFI_ADJUST_CFA_OFFSET(4)
- PUSH ebx // pass high half of new_val
- PUSH edx // pass low half of new_val
- PUSH ecx // pass object
- PUSH eax // pass field_idx
- call SYMBOL(artSet64InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*)
- addl LITERAL(32), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_EAX_ZERO // return or deliver exception
-END_FUNCTION art_quick_set64_instance
-
-// Call artSet64StaticFromCode with 3 word size arguments plus with the referrer in the 2nd position
-// so that new_val is aligned on even registers were we passing arguments in registers.
-DEFINE_FUNCTION art_quick_set64_static
- // TODO: Implement SETUP_GOT_NOSAVE for got_reg = ecx to avoid moving around the registers.
- movd %ebx, %xmm0
- SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
- movd %xmm0, %ebx
- mov FRAME_SIZE_SAVE_REFS_ONLY(%esp), %ecx // get referrer
- subl LITERAL(12), %esp // alignment padding
+ subl LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ebx // pass high half of new_val
PUSH edx // pass low half of new_val
- PUSH ecx // pass referrer
+ PUSH ecx // pass object
PUSH eax // pass field_idx
- call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*)
+ call SYMBOL(artSet64InstanceFromCompiledCode) // (field_idx, Object*, new_val, Thread*)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
-END_FUNCTION art_quick_set64_static
+END_FUNCTION art_quick_set64_instance
DEFINE_FUNCTION art_quick_proxy_invoke_handler
SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_EAX
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 46bee39..a1ae858 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -919,11 +919,10 @@
MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- movq 8(%rsp), %rsi // pass referrer
SETUP_SAVE_REFS_ONLY_FRAME
// arg0 is in rdi
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call CALLVAR(cxx_name) // cxx_name(arg0, referrer, Thread*)
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro)
END_FUNCTION VAR(c_name)
@@ -931,11 +930,10 @@
MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- movq 8(%rsp), %rdx // pass referrer
SETUP_SAVE_REFS_ONLY_FRAME
// arg0 and arg1 are in rdi/rsi
- movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
- call CALLVAR(cxx_name) // (arg0, arg1, referrer, Thread*)
+ movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
+ call CALLVAR(cxx_name) // (arg0, arg1, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro)
END_FUNCTION VAR(c_name)
@@ -943,11 +941,10 @@
MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- movq 8(%rsp), %rcx // pass referrer
SETUP_SAVE_REFS_ONLY_FRAME
// arg0, arg1, and arg2 are in rdi/rsi/rdx
- movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
- call CALLVAR(cxx_name) // cxx_name(arg0, arg1, arg2, referrer, Thread*)
+ movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
+ call CALLVAR(cxx_name) // cxx_name(arg0, arg1, arg2, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
@@ -987,6 +984,10 @@
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
@@ -995,13 +996,18 @@
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
-DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
+MACRO2(ART_QUICK_ALLOC_OBJECT_ROSALLOC, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name)
// Fast path rosalloc allocation.
// RDI: mirror::Class*, RAX: return value
// RSI, RDX, RCX, R8, R9: free.
@@ -1010,14 +1016,14 @@
movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
movq THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%r8), %rcx // rcx = alloc stack top.
cmpq THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%r8), %rcx
- jae .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ jae .Lslow_path\c_name
// Load the object size
movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdi), %eax
// Check if the size is for a thread
// local allocation. Also does the
// initialized and finalizable checks.
cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %eax
- ja .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ ja .Lslow_path\c_name
// Compute the rosalloc bracket index
// from the size.
shrq LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %rax
@@ -1031,7 +1037,7 @@
// will be the return val.
movq (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%r9), %rax
testq %rax, %rax
- jz .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ jz .Lslow_path\c_name
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber rdi and rsi.
// Push the new object onto the thread
// local allocation stack and
@@ -1058,25 +1064,19 @@
decl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)(%r9)
// No fence necessary for x86.
ret
-.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
+.Lslow_path\c_name:
SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call SYMBOL(artAllocObjectFromCodeResolvedRosAlloc) // cxx_name(arg0, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
-END_FUNCTION art_quick_alloc_object_rosalloc
-
-// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
-//
-// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value.
-// RCX: scratch, r8: Thread::Current().
-MACRO1(ALLOC_OBJECT_TLAB_FAST_PATH, slowPathLabel)
- testl %edx, %edx // Check null class
- jz RAW_VAR(slowPathLabel)
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH(RAW_VAR(slowPathLabel))
+ END_FUNCTION VAR(c_name)
END_MACRO
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
+
// The common fast path code for art_quick_alloc_object_resolved_region_tlab.
// TODO: delete ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH since it is the same as
// ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH.
@@ -1112,26 +1112,11 @@
END_MACRO
// The fast path code for art_quick_alloc_array_region_tlab.
-// Inputs: RDI: the class, RSI: int32_t component_count
-// Free temps: RCX, RDX, R8, R9
+// Inputs: RDI: the class, RSI: int32_t component_count, R9: total_size
+// Free temps: RCX, RDX, R8
// Output: RAX: return value.
-MACRO1(ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED, slowPathLabel)
- movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%rdi), %ecx // Load component type.
- UNPOISON_HEAP_REF ecx
- movl MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET(%rcx), %ecx // Load primitive type.
- shrq LITERAL(PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT), %rcx // Get component size shift.
- movq %rsi, %r9
- salq %cl, %r9 // Calculate array count shifted.
- // Add array header + alignment rounding.
- addq LITERAL(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK), %r9
- // Add 4 extra bytes if we are doing a long array.
- addq LITERAL(1), %rcx
- andq LITERAL(4), %rcx
- addq %rcx, %r9
+MACRO1(ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE, slowPathLabel)
movq %gs:THREAD_SELF_OFFSET, %rcx // rcx = thread
-#if MIRROR_LONG_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
-#error Long array data offset must be 4 greater than int array data offset.
-#endif
// Mask out the unaligned part to make sure we are 8 byte aligned.
andq LITERAL(OBJECT_ALIGNMENT_MASK_TOGGLED64), %r9
movq THREAD_LOCAL_POS_OFFSET(%rcx), %rax
@@ -1149,7 +1134,6 @@
ret // Fast path succeeded.
END_MACRO
-
// The common slow path code for art_quick_alloc_object_{resolved, initialized}_tlab
// and art_quick_alloc_object_{resolved, initialized}_region_tlab.
MACRO1(ALLOC_OBJECT_TLAB_SLOW_PATH, cxx_name)
@@ -1161,16 +1145,6 @@
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
-// The slow path code for art_quick_alloc_array_region_tlab.
-MACRO1(ALLOC_ARRAY_TLAB_SLOW_PATH, cxx_name)
- SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
- // Outgoing argument set up
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
-END_MACRO
-
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). May be
// called with CC if the GC is not active.
DEFINE_FUNCTION art_quick_alloc_object_resolved_tlab
@@ -1191,25 +1165,87 @@
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeInitializedTLAB
END_FUNCTION art_quick_alloc_object_initialized_tlab
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB).
-DEFINE_FUNCTION art_quick_alloc_array_resolved_tlab
+MACRO0(COMPUTE_ARRAY_SIZE_UNKNOWN)
+ movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%rdi), %ecx // Load component type.
+ UNPOISON_HEAP_REF ecx
+ movl MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET(%rcx), %ecx // Load primitive type.
+ shrq MACRO_LITERAL(PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT), %rcx // Get component size shift.
+ movq %rsi, %r9
+ salq %cl, %r9 // Calculate array count shifted.
+ // Add array header + alignment rounding.
+ addq MACRO_LITERAL(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK), %r9
+ // Add 4 extra bytes if we are doing a long array.
+ addq MACRO_LITERAL(1), %rcx
+ andq MACRO_LITERAL(4), %rcx
+#if MIRROR_LONG_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
+#error Long array data offset must be 4 greater than int array data offset.
+#endif
+ addq %rcx, %r9
+END_MACRO
+
+MACRO0(COMPUTE_ARRAY_SIZE_8)
// RDI: mirror::Class* klass, RSI: int32_t component_count
// RDX, RCX, R8, R9: free. RAX: return val.
- ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED .Lart_quick_alloc_array_resolved_tlab_slow_path
-.Lart_quick_alloc_array_resolved_tlab_slow_path:
- ALLOC_ARRAY_TLAB_SLOW_PATH artAllocArrayFromCodeResolvedTLAB
-END_FUNCTION art_quick_alloc_array_resolved_tlab
+ movq %rsi, %r9
+ // Add array header + alignment rounding.
+ addq MACRO_LITERAL(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK), %r9
+END_MACRO
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB).
-DEFINE_FUNCTION art_quick_alloc_array_resolved_region_tlab
- // Fast path region tlab allocation.
+MACRO0(COMPUTE_ARRAY_SIZE_16)
// RDI: mirror::Class* klass, RSI: int32_t component_count
- // RCX, RDX, R8, R9: free. RAX: return val.
- ASSERT_USE_READ_BARRIER
- ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED .Lart_quick_alloc_array_resolved_region_tlab_slow_path
-.Lart_quick_alloc_array_resolved_region_tlab_slow_path:
- ALLOC_ARRAY_TLAB_SLOW_PATH artAllocArrayFromCodeResolvedRegionTLAB
-END_FUNCTION art_quick_alloc_array_resolved_region_tlab
+ // RDX, RCX, R8, R9: free. RAX: return val.
+ movq %rsi, %r9
+ salq MACRO_LITERAL(1), %r9
+ // Add array header + alignment rounding.
+ addq MACRO_LITERAL(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK), %r9
+END_MACRO
+
+MACRO0(COMPUTE_ARRAY_SIZE_32)
+ // RDI: mirror::Class* klass, RSI: int32_t component_count
+ // RDX, RCX, R8, R9: free. RAX: return val.
+ movq %rsi, %r9
+ salq MACRO_LITERAL(2), %r9
+ // Add array header + alignment rounding.
+ addq MACRO_LITERAL(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK), %r9
+END_MACRO
+
+MACRO0(COMPUTE_ARRAY_SIZE_64)
+ // RDI: mirror::Class* klass, RSI: int32_t component_count
+ // RDX, RCX, R8, R9: free. RAX: return val.
+ movq %rsi, %r9
+ salq MACRO_LITERAL(3), %r9
+ // Add array header + alignment rounding.
+ addq MACRO_LITERAL(MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK), %r9
+END_MACRO
+
+MACRO3(GENERATE_ALLOC_ARRAY_TLAB, c_entrypoint, cxx_name, size_setup)
+ DEFINE_FUNCTION VAR(c_entrypoint)
+ // RDI: mirror::Class* klass, RSI: int32_t component_count
+ // RDX, RCX, R8, R9: free. RAX: return val.
+ CALL_MACRO(size_setup)
+ ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\c_entrypoint
+.Lslow_path\c_entrypoint:
+ SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
+ // Outgoing argument set up
+ movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
+ call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ END_FUNCTION VAR(c_entrypoint)
+END_MACRO
+
+
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
+
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB).
DEFINE_FUNCTION art_quick_alloc_object_resolved_region_tlab
@@ -1239,7 +1275,7 @@
// Outgoing argument set up
movl %eax, %edi // pass string index
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call SYMBOL(artResolveStringFromCode) // artResolveStringFromCode(arg0, referrer, Thread*)
+ call SYMBOL(artResolveStringFromCode) // artResolveStringFromCode(arg0, Thread*)
testl %eax, %eax // If result is null, deliver the OOME.
jz 1f
@@ -1551,45 +1587,33 @@
UNIMPLEMENTED art_quick_lshr
UNIMPLEMENTED art_quick_lushr
-THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_EAX_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_EAX_ZERO
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-
-// This is singled out as the argument order is different.
-DEFINE_FUNCTION art_quick_set64_static
- // new_val is already in %rdx
- movq 8(%rsp), %rsi // pass referrer
- SETUP_SAVE_REFS_ONLY_FRAME
- // field_idx is in rdi
- movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
- call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_EAX_ZERO // return or deliver exception
-END_FUNCTION art_quick_set64_static
-
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
DEFINE_FUNCTION art_quick_proxy_invoke_handler
SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_RDI
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index b9f688d..80af8e7 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -52,7 +52,7 @@
}
inline MemberOffset ArtField::GetOffset() {
- DCHECK(GetDeclaringClass()->IsResolved() || GetDeclaringClass()->IsErroneous());
+ DCHECK(GetDeclaringClass()->IsResolved());
return MemberOffset(offset_);
}
@@ -132,7 +132,6 @@
return (object)->GetField ## type(GetOffset());
#define FIELD_SET(object, type, value) \
- DCHECK_EQ(Primitive::kPrim ## type, GetTypeAsPrimitiveType()) << PrettyField(); \
DCHECK((object) != nullptr) << PrettyField(); \
DCHECK(!IsStatic() || ((object) == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); \
if (UNLIKELY(IsVolatile())) { \
@@ -147,6 +146,12 @@
template<bool kTransactionActive>
inline void ArtField::SetBoolean(ObjPtr<mirror::Object> object, uint8_t z) {
+ if (kIsDebugBuild) {
+ // For simplicity, this method is being called by the compiler entrypoint for
+ // both boolean and byte fields.
+ Primitive::Type type = GetTypeAsPrimitiveType();
+ DCHECK(type == Primitive::kPrimBoolean || type == Primitive::kPrimByte) << PrettyField();
+ }
FIELD_SET(object, Boolean, z);
}
@@ -156,6 +161,7 @@
template<bool kTransactionActive>
inline void ArtField::SetByte(ObjPtr<mirror::Object> object, int8_t b) {
+ DCHECK_EQ(Primitive::kPrimByte, GetTypeAsPrimitiveType()) << PrettyField();
FIELD_SET(object, Byte, b);
}
@@ -165,6 +171,12 @@
template<bool kTransactionActive>
inline void ArtField::SetChar(ObjPtr<mirror::Object> object, uint16_t c) {
+ if (kIsDebugBuild) {
+ // For simplicity, this method is being called by the compiler entrypoint for
+ // both char and short fields.
+ Primitive::Type type = GetTypeAsPrimitiveType();
+ DCHECK(type == Primitive::kPrimChar || type == Primitive::kPrimShort) << PrettyField();
+ }
FIELD_SET(object, Char, c);
}
@@ -174,6 +186,7 @@
template<bool kTransactionActive>
inline void ArtField::SetShort(ObjPtr<mirror::Object> object, int16_t s) {
+ DCHECK_EQ(Primitive::kPrimShort, GetTypeAsPrimitiveType()) << PrettyField();
FIELD_SET(object, Short, s);
}
@@ -182,6 +195,8 @@
inline int32_t ArtField::GetInt(ObjPtr<mirror::Object> object) {
if (kIsDebugBuild) {
+ // For simplicity, this method is being called by the compiler entrypoint for
+ // both int and float fields.
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimInt || type == Primitive::kPrimFloat) << PrettyField();
}
@@ -191,6 +206,8 @@
template<bool kTransactionActive>
inline void ArtField::SetInt(ObjPtr<mirror::Object> object, int32_t i) {
if (kIsDebugBuild) {
+ // For simplicity, this method is being called by the compiler entrypoint for
+ // both int and float fields.
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimInt || type == Primitive::kPrimFloat) << PrettyField();
}
@@ -199,6 +216,8 @@
inline int64_t ArtField::GetLong(ObjPtr<mirror::Object> object) {
if (kIsDebugBuild) {
+ // For simplicity, this method is being called by the compiler entrypoint for
+ // both long and double fields.
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimLong || type == Primitive::kPrimDouble) << PrettyField();
}
@@ -208,6 +227,8 @@
template<bool kTransactionActive>
inline void ArtField::SetLong(ObjPtr<mirror::Object> object, int64_t j) {
if (kIsDebugBuild) {
+ // For simplicity, this method is being called by the compiler entrypoint for
+ // both long and double fields.
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimLong || type == Primitive::kPrimDouble) << PrettyField();
}
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 15938c5..950f1aa 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -109,8 +109,7 @@
}
inline uint16_t ArtMethod::GetMethodIndex() {
- DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsResolved() ||
- GetDeclaringClass()->IsErroneous());
+ DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsResolved());
return method_index_;
}
@@ -245,7 +244,9 @@
}
inline const DexFile* ArtMethod::GetDexFile() {
- return GetDexCache()->GetDexFile();
+ // It is safe to avoid the read barrier here since the dex file is constant, so if we read the
+ // from-space dex file pointer it will be equal to the to-space copy.
+ return GetDexCache<kWithoutReadBarrier>()->GetDexFile();
}
inline const char* ArtMethod::GetDeclaringClassDescriptor() {
@@ -362,32 +363,34 @@
return GetDeclaringClass()->GetClassLoader();
}
+template <ReadBarrierOption kReadBarrierOption>
inline mirror::DexCache* ArtMethod::GetDexCache() {
if (LIKELY(!IsObsolete())) {
- return GetDeclaringClass()->GetDexCache();
+ mirror::Class* klass = GetDeclaringClass<kReadBarrierOption>();
+ return klass->GetDexCache<kDefaultVerifyFlags, kReadBarrierOption>();
} else {
DCHECK(!IsProxyMethod());
return GetObsoleteDexCache();
}
}
-template<ReadBarrierOption kReadBarrierOption>
inline bool ArtMethod::IsProxyMethod() {
- return GetDeclaringClass<kReadBarrierOption>()->IsProxyClass();
+ // Avoid read barrier since the from-space version of the class will have the correct proxy class
+ // flags since they are constant for the lifetime of the class.
+ return GetDeclaringClass<kWithoutReadBarrier>()->IsProxyClass();
}
inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy(PointerSize pointer_size) {
if (LIKELY(!IsProxyMethod())) {
return this;
}
- mirror::Class* klass = GetDeclaringClass();
ArtMethod* interface_method = mirror::DexCache::GetElementPtrSize(
GetDexCacheResolvedMethods(pointer_size),
GetDexMethodIndex(),
pointer_size);
DCHECK(interface_method != nullptr);
DCHECK_EQ(interface_method,
- Runtime::Current()->GetClassLinker()->FindMethodForProxy(klass, this));
+ Runtime::Current()->GetClassLinker()->FindMethodForProxy(GetDeclaringClass(), this));
return interface_method;
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index d7d39af..6cb8544 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -55,15 +55,24 @@
extern "C" void art_quick_invoke_static_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*,
const char*);
-ArtMethod* ArtMethod::GetSingleImplementation() {
+ArtMethod* ArtMethod::GetNonObsoleteMethod() {
+ DCHECK_EQ(kRuntimePointerSize, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
+ if (LIKELY(!IsObsolete())) {
+ return this;
+ } else if (IsDirect()) {
+ return &GetDeclaringClass()->GetDirectMethodsSlice(kRuntimePointerSize)[GetMethodIndex()];
+ } else {
+ return GetDeclaringClass()->GetVTableEntry(GetMethodIndex(), kRuntimePointerSize);
+ }
+}
+
+ArtMethod* ArtMethod::GetSingleImplementation(PointerSize pointer_size) {
DCHECK(!IsNative());
if (!IsAbstract()) {
// A non-abstract's single implementation is itself.
return this;
}
- // TODO: add single-implementation logic for abstract method by storing it
- // in ptr_sized_fields_.
- return nullptr;
+ return reinterpret_cast<ArtMethod*>(GetDataPtrSize(pointer_size));
}
ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
@@ -437,6 +446,8 @@
PointerSize pointer_size,
bool* found)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ // We shouldn't be calling this with obsolete methods.
+ DCHECK(!method->IsObsolete());
// Although we overwrite the trampoline of non-static methods, we may get here via the resolution
// method for direct methods (or virtual methods made direct).
mirror::Class* declaring_class = method->GetDeclaringClass();
@@ -719,21 +730,7 @@
}
std::string ArtMethod::JniShortName() {
- std::string class_name(GetDeclaringClassDescriptor());
- // Remove the leading 'L' and trailing ';'...
- CHECK_EQ(class_name[0], 'L') << class_name;
- CHECK_EQ(class_name[class_name.size() - 1], ';') << class_name;
- class_name.erase(0, 1);
- class_name.erase(class_name.size() - 1, 1);
-
- std::string method_name(GetName());
-
- std::string short_name;
- short_name += "Java_";
- short_name += MangleForJni(class_name);
- short_name += "_";
- short_name += MangleForJni(method_name);
- return short_name;
+ return GetJniShortName(GetDeclaringClassDescriptor(), GetName());
}
std::string ArtMethod::JniLongName() {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 17f343d..3836303 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -201,6 +201,10 @@
return (GetAccessFlags() & kAccCompileDontBother) == 0;
}
+ void SetDontCompile() {
+ AddAccessFlags(kAccCompileDontBother);
+ }
+
// A default conflict method is a special sentinel method that stands for a conflict between
// multiple default methods. It cannot be invoked, throwing an IncompatibleClassChangeError if one
// attempts to do so.
@@ -226,7 +230,7 @@
void SetIsObsolete() {
// TODO We should really support redefining intrinsic if possible.
DCHECK(!IsIntrinsic());
- SetAccessFlags(GetAccessFlags() | kAccObsoleteMethod);
+ AddAccessFlags(kAccObsoleteMethod);
}
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
@@ -251,7 +255,6 @@
return (GetAccessFlags() & kAccVarargs) != 0;
}
- template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsProxyMethod() REQUIRES_SHARED(Locks::mutator_lock_);
bool SkipAccessChecks() {
@@ -456,7 +459,7 @@
}
}
- ArtMethod* GetSingleImplementation()
+ ArtMethod* GetSingleImplementation(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void SetSingleImplementation(ArtMethod* method, PointerSize pointer_size) {
@@ -563,12 +566,15 @@
mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
+ template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
mirror::DexCache* GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
mirror::DexCache* GetObsoleteDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ ArtMethod* GetNonObsoleteMethod() REQUIRES_SHARED(Locks::mutator_lock_);
+
// May cause thread suspension due to class resolution.
bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -684,7 +690,8 @@
ArtMethod** dex_cache_resolved_methods_;
// Pointer to JNI function registered to this method, or a function to resolve the JNI function,
- // or the profiling data for non-native methods, or an ImtConflictTable.
+ // or the profiling data for non-native methods, or an ImtConflictTable, or the
+ // single-implementation of an abstract method.
void* data_;
// Method dispatch from quick compiled code invokes this pointer which may cause bridging into
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index ed83f1c..c7a94a9 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -72,7 +72,7 @@
// Import platform-independent constant defines from our autogenerated list.
// Export new defines (for assembly use) by editing cpp-define-generator def files.
#define DEFINE_CHECK_EQ ADD_TEST_EQ
-#include "generated/asm_support_gen.h"
+#include "asm_support_gen.h"
// Offset of field Thread::tlsPtr_.exception.
#define THREAD_EXCEPTION_OFFSET (THREAD_CARD_TABLE_OFFSET + __SIZEOF_POINTER__)
@@ -104,7 +104,7 @@
// Offset of field Thread::tlsPtr_.mterp_current_ibase.
#define THREAD_CURRENT_IBASE_OFFSET \
- (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__ + (1 + 157) * __SIZEOF_POINTER__)
+ (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__ + (1 + 161) * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
art::Thread::MterpCurrentIBaseOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_default_ibase.
diff --git a/runtime/atomic.h b/runtime/atomic.h
index e2a7259..45c3165 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -235,6 +235,11 @@
this->store(desired, std::memory_order_seq_cst);
}
+ // Atomically replace the value with desired value.
+ T ExchangeRelaxed(T desired_value) {
+ return this->exchange(desired_value, std::memory_order_relaxed);
+ }
+
// Atomically replace the value with desired value if it matches the expected value.
// Participates in total ordering of atomic operations.
bool CompareExchangeStrongSequentiallyConsistent(T expected_value, T desired_value) {
@@ -283,6 +288,10 @@
return this->fetch_sub(value, std::memory_order_seq_cst); // Return old value.
}
+ T FetchAndSubRelaxed(const T value) {
+ return this->fetch_sub(value, std::memory_order_relaxed); // Return old value.
+ }
+
T FetchAndOrSequentiallyConsistent(const T value) {
return this->fetch_or(value, std::memory_order_seq_cst); // Return old_value.
}
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 61e0aab..9fdb0cc 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -84,6 +84,7 @@
"Verifier ",
"CallingConv ",
"CHA ",
+ "Scheduler ",
};
template <bool kCount>
@@ -144,8 +145,11 @@
}
}
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Winstantiation-after-specialization"
// Explicitly instantiate the used implementation.
template class ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations>;
+#pragma GCC diagnostic pop
void ArenaAllocatorMemoryTool::DoMakeDefined(void* ptr, size_t size) {
MEMORY_TOOL_MAKE_DEFINED(ptr, size);
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 6c764cb..245ab3b 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -96,6 +96,7 @@
kArenaAllocVerifier,
kArenaAllocCallingConvention,
kArenaAllocCHA,
+ kArenaAllocScheduler,
kNumArenaAllocKinds
};
diff --git a/runtime/base/iteration_range.h b/runtime/base/iteration_range.h
index 9d45707..3f6f5d6 100644
--- a/runtime/base/iteration_range.h
+++ b/runtime/base/iteration_range.h
@@ -55,7 +55,7 @@
}
template <typename Container>
-inline auto ReverseRange(Container& c) {
+inline auto ReverseRange(Container&& c) {
typedef typename std::reverse_iterator<decltype(c.begin())> riter;
return MakeIterationRange(riter(c.end()), riter(c.begin()));
}
diff --git a/runtime/bit_memory_region.h b/runtime/bit_memory_region.h
new file mode 100644
index 0000000..c3b5be4
--- /dev/null
+++ b/runtime/bit_memory_region.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BIT_MEMORY_REGION_H_
+#define ART_RUNTIME_BIT_MEMORY_REGION_H_
+
+#include "memory_region.h"
+
+namespace art {
+
+// Bit memory region is a bit offset subregion of a normal memoryregion. This is useful for
+// abstracting away the bit start offset to avoid needing passing as an argument everywhere.
+class BitMemoryRegion FINAL : public ValueObject {
+ public:
+ BitMemoryRegion() = default;
+ ALWAYS_INLINE BitMemoryRegion(MemoryRegion region, size_t bit_offset, size_t bit_size) {
+ bit_start_ = bit_offset % kBitsPerByte;
+ const size_t start = bit_offset / kBitsPerByte;
+ const size_t end = (bit_offset + bit_size + kBitsPerByte - 1) / kBitsPerByte;
+ region_ = region.Subregion(start, end - start);
+ }
+
+ void* pointer() const { return region_.pointer(); }
+ size_t size() const { return region_.size(); }
+ size_t BitOffset() const { return bit_start_; }
+ size_t size_in_bits() const {
+ return region_.size_in_bits();
+ }
+
+ // Load a single bit in the region. The bit at offset 0 is the least
+ // significant bit in the first byte.
+ ALWAYS_INLINE bool LoadBit(uintptr_t bit_offset) const {
+ return region_.LoadBit(bit_offset + bit_start_);
+ }
+
+ ALWAYS_INLINE void StoreBit(uintptr_t bit_offset, bool value) const {
+ region_.StoreBit(bit_offset + bit_start_, value);
+ }
+
+ ALWAYS_INLINE uint32_t LoadBits(uintptr_t bit_offset, size_t length) const {
+ return region_.LoadBits(bit_offset + bit_start_, length);
+ }
+
+ // Store at a bit offset from inside the bit memory region.
+ ALWAYS_INLINE void StoreBits(uintptr_t bit_offset, uint32_t value, size_t length) {
+ region_.StoreBits(bit_offset + bit_start_, value, length);
+ }
+
+ private:
+ MemoryRegion region_;
+ size_t bit_start_ = 0;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_BIT_MEMORY_REGION_H_
diff --git a/runtime/cha.cc b/runtime/cha.cc
index d94b091..d11b12f 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -16,6 +16,7 @@
#include "cha.h"
+#include "art_method-inl.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "runtime.h"
@@ -185,7 +186,8 @@
};
void ClassHierarchyAnalysis::VerifyNonSingleImplementation(mirror::Class* verify_class,
- uint16_t verify_index) {
+ uint16_t verify_index,
+ ArtMethod* excluded_method) {
// Grab cha_lock_ to make sure all single-implementation updates are seen.
PointerSize image_pointer_size =
Runtime::Current()->GetClassLinker()->GetImagePointerSize();
@@ -195,9 +197,14 @@
return;
}
ArtMethod* verify_method = verify_class->GetVTableEntry(verify_index, image_pointer_size);
- DCHECK(!verify_method->HasSingleImplementation())
- << "class: " << verify_class->PrettyClass()
- << " verify_method: " << verify_method->PrettyMethod(true);
+ if (verify_method != excluded_method) {
+ DCHECK(!verify_method->HasSingleImplementation())
+ << "class: " << verify_class->PrettyClass()
+ << " verify_method: " << verify_method->PrettyMethod(true);
+ if (verify_method->IsAbstract()) {
+ DCHECK(verify_method->GetSingleImplementation(image_pointer_size) == nullptr);
+ }
+ }
verify_class = verify_class->GetSuperClass();
}
}
@@ -206,41 +213,160 @@
Handle<mirror::Class> klass,
ArtMethod* virtual_method,
ArtMethod* method_in_super,
- std::unordered_set<ArtMethod*>& invalidated_single_impl_methods) {
+ std::unordered_set<ArtMethod*>& invalidated_single_impl_methods,
+ PointerSize pointer_size) {
// TODO: if klass is not instantiable, virtual_method isn't invocable yet so
// even if it overrides, it doesn't invalidate single-implementation
// assumption.
- DCHECK_NE(virtual_method, method_in_super);
+ DCHECK((virtual_method != method_in_super) || virtual_method->IsAbstract());
DCHECK(method_in_super->GetDeclaringClass()->IsResolved()) << "class isn't resolved";
// If virtual_method doesn't come from a default interface method, it should
// be supplied by klass.
- DCHECK(virtual_method->IsCopied() ||
+ DCHECK(virtual_method == method_in_super ||
+ virtual_method->IsCopied() ||
virtual_method->GetDeclaringClass() == klass.Get());
- // A new virtual_method should set method_in_super to
- // non-single-implementation (if not set already).
- // We don't grab cha_lock_. Single-implementation flag won't be set to true
- // again once it's set to false.
+ // To make updating single-implementation flags simple, we always maintain the following
+ // invariant:
+ // Say all virtual methods in the same vtable slot, starting from the bottom child class
+ // to super classes, is a sequence of unique methods m3, m2, m1, ... (after removing duplicate
+ // methods for inherited methods).
+ // For example for the following class hierarchy,
+ // class A { void m() { ... } }
+ // class B extends A { void m() { ... } }
+ // class C extends B {}
+ // class D extends C { void m() { ... } }
+ // the sequence is D.m(), B.m(), A.m().
+ // The single-implementation status for that sequence of methods begin with one or two true's,
+ // then become all falses. The only case where two true's are possible is for one abstract
+ // method m and one non-abstract method mImpl that overrides method m.
+ // With the invariant, when linking in a new class, we only need to at most update one or
+ // two methods in the sequence for their single-implementation status, in order to maintain
+ // the invariant.
+
if (!method_in_super->HasSingleImplementation()) {
// method_in_super already has multiple implementations. All methods in the
// same vtable slots in its super classes should have
// non-single-implementation already.
if (kIsDebugBuild) {
VerifyNonSingleImplementation(klass->GetSuperClass()->GetSuperClass(),
- method_in_super->GetMethodIndex());
+ method_in_super->GetMethodIndex(),
+ nullptr /* excluded_method */);
}
return;
}
// Native methods don't have single-implementation flag set.
DCHECK(!method_in_super->IsNative());
- // Invalidate method_in_super's single-implementation status.
- invalidated_single_impl_methods.insert(method_in_super);
+
+ uint16_t method_index = method_in_super->GetMethodIndex();
+ if (method_in_super->IsAbstract()) {
+ if (kIsDebugBuild) {
+ // An abstract method should have made all methods in the same vtable
+ // slot above it in the class hierarchy having non-single-implementation.
+ mirror::Class* super_super = klass->GetSuperClass()->GetSuperClass();
+ VerifyNonSingleImplementation(super_super,
+ method_index,
+ method_in_super);
+ }
+
+ if (virtual_method->IsAbstract()) {
+ // SUPER: abstract, VIRTUAL: abstract.
+ if (method_in_super == virtual_method) {
+ DCHECK(klass->IsInstantiable());
+ // An instantiable subclass hasn't provided a concrete implementation of
+ // the abstract method. Invoking method_in_super may throw AbstractMethodError.
+ // This is an uncommon case, so we simply treat method_in_super as not
+ // having single-implementation.
+ invalidated_single_impl_methods.insert(method_in_super);
+ return;
+ } else {
+ // One abstract method overrides another abstract method. This is an uncommon
+ // case. We simply treat method_in_super as not having single-implementation.
+ invalidated_single_impl_methods.insert(method_in_super);
+ return;
+ }
+ } else {
+ // SUPER: abstract, VIRTUAL: non-abstract.
+ // A non-abstract method overrides an abstract method.
+ if (method_in_super->GetSingleImplementation(pointer_size) == nullptr) {
+ // Abstract method_in_super has no implementation yet.
+ // We need to grab cha_lock_ for further checking/updating due to possible
+ // races.
+ MutexLock cha_mu(Thread::Current(), *Locks::cha_lock_);
+ if (!method_in_super->HasSingleImplementation()) {
+ return;
+ }
+ if (method_in_super->GetSingleImplementation(pointer_size) == nullptr) {
+ // virtual_method becomes the first implementation for method_in_super.
+ method_in_super->SetSingleImplementation(virtual_method, pointer_size);
+ // Keep method_in_super's single-implementation status.
+ return;
+ }
+ // Fall through to invalidate method_in_super's single-implementation status.
+ }
+ // Abstract method_in_super already got one implementation.
+ // Invalidate method_in_super's single-implementation status.
+ invalidated_single_impl_methods.insert(method_in_super);
+ return;
+ }
+ } else {
+ if (virtual_method->IsAbstract()) {
+ // SUPER: non-abstract, VIRTUAL: abstract.
+ // An abstract method overrides a non-abstract method. This is an uncommon
+ // case, we simply treat both methods as not having single-implementation.
+ invalidated_single_impl_methods.insert(virtual_method);
+ // Fall-through to handle invalidating method_in_super of its
+ // single-implementation status.
+ }
+
+ // SUPER: non-abstract, VIRTUAL: non-abstract/abstract(fall-through from previous if).
+ // Invalidate method_in_super's single-implementation status.
+ invalidated_single_impl_methods.insert(method_in_super);
+
+ // method_in_super might be the single-implementation of another abstract method,
+ // which should be also invalidated of its single-implementation status.
+ mirror::Class* super_super = klass->GetSuperClass()->GetSuperClass();
+ while (super_super != nullptr &&
+ method_index < super_super->GetVTableLength()) {
+ ArtMethod* method_in_super_super = super_super->GetVTableEntry(method_index, pointer_size);
+ if (method_in_super_super != method_in_super) {
+ if (method_in_super_super->IsAbstract()) {
+ if (method_in_super_super->HasSingleImplementation()) {
+ // Invalidate method_in_super's single-implementation status.
+ invalidated_single_impl_methods.insert(method_in_super_super);
+ // No need to further traverse up the class hierarchy since if there
+ // are cases that one abstract method overrides another method, we
+ // should have made that method having non-single-implementation already.
+ } else {
+ // method_in_super_super is already non-single-implementation.
+ // No need to further traverse up the class hierarchy.
+ }
+ } else {
+ DCHECK(!method_in_super_super->HasSingleImplementation());
+ // No need to further traverse up the class hierarchy since two non-abstract
+ // methods (method_in_super and method_in_super_super) should have set all
+ // other methods (abstract or not) in the vtable slot to be non-single-implementation.
+ }
+
+ if (kIsDebugBuild) {
+ VerifyNonSingleImplementation(super_super->GetSuperClass(),
+ method_index,
+ method_in_super_super);
+ }
+ // No need to go any further.
+ return;
+ } else {
+ super_super = super_super->GetSuperClass();
+ }
+ }
+ }
}
void ClassHierarchyAnalysis::InitSingleImplementationFlag(Handle<mirror::Class> klass,
- ArtMethod* method) {
+ ArtMethod* method,
+ PointerSize pointer_size) {
DCHECK(method->IsCopied() || method->GetDeclaringClass() == klass.Get());
if (klass->IsFinal() || method->IsFinal()) {
// Final classes or methods do not need CHA for devirtualization.
@@ -253,16 +379,21 @@
// cannot be inlined. It's not worthwhile to devirtualize the
// call which can add a deoptimization point.
DCHECK(!method->HasSingleImplementation());
+ } else if (method->IsAbstract()) {
+ if (method->GetDeclaringClass()->IsInstantiable()) {
+ // Rare case, but we do accept it (such as 800-smali/smali/b_26143249.smali).
+ // Do not attempt to devirtualize it.
+ method->SetHasSingleImplementation(false);
+ } else {
+ // Abstract method starts with single-implementation flag set and null
+ // implementation method.
+ method->SetHasSingleImplementation(true);
+ DCHECK(method->GetSingleImplementation(pointer_size) == nullptr);
+ }
} else {
method->SetHasSingleImplementation(true);
- if (method->IsAbstract()) {
- // There is no real implementation yet.
- // TODO: implement single-implementation logic for abstract methods.
- DCHECK(method->GetSingleImplementation() == nullptr);
- } else {
- // Single implementation of non-abstract method is itself.
- DCHECK_EQ(method->GetSingleImplementation(), method);
- }
+ // Single implementation of non-abstract method is itself.
+ DCHECK_EQ(method->GetSingleImplementation(pointer_size), method);
}
}
@@ -286,19 +417,29 @@
ArtMethod* method_in_super = super_class->GetVTableEntry(i, image_pointer_size);
if (method == method_in_super) {
// vtable slot entry is inherited from super class.
+ if (method->IsAbstract() && klass->IsInstantiable()) {
+ // An instantiable class that inherits an abstract method is treated as
+ // supplying an implementation that throws AbstractMethodError.
+ CheckSingleImplementationInfo(klass,
+ method,
+ method_in_super,
+ invalidated_single_impl_methods,
+ image_pointer_size);
+ }
continue;
}
- InitSingleImplementationFlag(klass, method);
+ InitSingleImplementationFlag(klass, method, image_pointer_size);
CheckSingleImplementationInfo(klass,
method,
method_in_super,
- invalidated_single_impl_methods);
+ invalidated_single_impl_methods,
+ image_pointer_size);
}
// For new virtual methods that don't override.
for (int32_t i = super_class->GetVTableLength(); i < klass->GetVTableLength(); ++i) {
ArtMethod* method = klass->GetVTableEntry(i, image_pointer_size);
- InitSingleImplementationFlag(klass, method);
+ InitSingleImplementationFlag(klass, method, image_pointer_size);
}
Runtime* const runtime = Runtime::Current();
@@ -321,6 +462,10 @@
continue;
}
invalidated->SetHasSingleImplementation(false);
+ if (invalidated->IsAbstract()) {
+ // Clear the single implementation method.
+ invalidated->SetSingleImplementation(nullptr, image_pointer_size);
+ }
if (runtime->IsAotCompiler()) {
// No need to invalidate any compiled code as the AotCompiler doesn't
diff --git a/runtime/cha.h b/runtime/cha.h
index ada5c89..a56a752 100644
--- a/runtime/cha.h
+++ b/runtime/cha.h
@@ -112,7 +112,9 @@
void UpdateAfterLoadingOf(Handle<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void InitSingleImplementationFlag(Handle<mirror::Class> klass, ArtMethod* method)
+ void InitSingleImplementationFlag(Handle<mirror::Class> klass,
+ ArtMethod* method,
+ PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
// `virtual_method` in `klass` overrides `method_in_super`.
@@ -123,12 +125,16 @@
Handle<mirror::Class> klass,
ArtMethod* virtual_method,
ArtMethod* method_in_super,
- std::unordered_set<ArtMethod*>& invalidated_single_impl_methods)
+ std::unordered_set<ArtMethod*>& invalidated_single_impl_methods,
+ PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Verify all methods in the same vtable slot from verify_class and its supers
- // don't have single-implementation.
- void VerifyNonSingleImplementation(mirror::Class* verify_class, uint16_t verify_index)
+ // For all methods in vtable slot at `verify_index` of `verify_class` and its
+ // superclasses, single-implementation status should be false, except if the
+ // method is `excluded_method`.
+ void VerifyNonSingleImplementation(mirror::Class* verify_class,
+ uint16_t verify_index,
+ ArtMethod* excluded_method)
REQUIRES_SHARED(Locks::mutator_lock_);
// A map that maps a method to a set of compiled code that assumes that method has a
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 93fdaa6..a955cb5 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -67,7 +67,8 @@
uint16_t number_of_dex_registers = m->GetCodeItem()->registers_size_;
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
- uint32_t register_mask = stack_map.GetRegisterMask(encoding.stack_map_encoding);
+ uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, stack_map);
+ BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
for (int i = 0; i < number_of_references; ++i) {
int reg = registers[i];
CHECK(reg < m->GetCodeItem()->registers_size_);
@@ -80,8 +81,7 @@
break;
case DexRegisterLocation::Kind::kInStack:
DCHECK_EQ(location.GetValue() % kFrameSlotSize, 0);
- CHECK(stack_map.GetStackMaskBit(encoding.stack_map_encoding,
- location.GetValue() / kFrameSlotSize));
+ CHECK(stack_mask.LoadBit(location.GetValue() / kFrameSlotSize));
break;
case DexRegisterLocation::Kind::kInRegister:
case DexRegisterLocation::Kind::kInRegisterHigh:
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 2e17dd8..3438810 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -68,16 +68,10 @@
inline mirror::String* ClassLinker::ResolveString(dex::StringIndex string_idx,
ArtMethod* referrer) {
Thread::PoisonObjectPointersIfDebug();
- ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
- // MethodVerifier refuses methods with string_idx out of bounds.
- DCHECK_LT(string_idx.index_, declaring_class->GetDexFile().NumStringIds());
- ObjPtr<mirror::String> string =
- mirror::StringDexCachePair::Lookup(declaring_class->GetDexCache()->GetStrings(),
- string_idx.index_,
- mirror::DexCache::kDexCacheStringCacheSize).Read();
+ ObjPtr<mirror::String> string = referrer->GetDexCache()->GetResolvedString(string_idx);
if (UNLIKELY(string == nullptr)) {
StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
const DexFile& dex_file = *dex_cache->GetDexFile();
string = ResolveString(dex_file, string_idx, dex_cache);
}
@@ -93,7 +87,7 @@
if (UNLIKELY(resolved_type == nullptr)) {
StackHandleScope<2> hs(Thread::Current());
ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
@@ -159,7 +153,7 @@
if (UNLIKELY(resolved_method == nullptr)) {
ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(referrer->GetDexCache()));
Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile* dex_file = h_dex_cache->GetDexFile();
resolved_method = ResolveMethod<kResolveMode>(*dex_file,
@@ -239,7 +233,7 @@
ArtMethod* ClassLinker::FindMethodForProxy(ObjPtr<mirror::Class> proxy_class,
ArtMethod* proxy_method) {
DCHECK(proxy_class->IsProxyClass());
- DCHECK(proxy_method->IsProxyMethod<kReadBarrierOption>());
+ DCHECK(proxy_method->IsProxyMethod());
{
Thread* const self = Thread::Current();
ReaderMutexLock mu(self, *Locks::dex_lock_);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index b8ed530..edd6e3b 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -19,6 +19,7 @@
#include <algorithm>
#include <deque>
#include <iostream>
+#include <map>
#include <memory>
#include <queue>
#include <string>
@@ -1340,7 +1341,7 @@
// The image space is not yet added to the heap, avoid read barriers.
ObjPtr<mirror::Class> klass = types[j].Read();
if (space->HasAddress(klass.Ptr())) {
- DCHECK_NE(klass->GetStatus(), mirror::Class::kStatusError);
+ DCHECK(!klass->IsErroneous()) << klass->GetStatus();
auto it = new_class_set->Find(ClassTable::TableSlot(klass));
DCHECK(it != new_class_set->end());
DCHECK_EQ(it->Read(), klass);
@@ -1703,7 +1704,7 @@
for (int32_t j = 0, num_types = h_dex_cache->NumResolvedTypes(); j < num_types; j++) {
ObjPtr<mirror::Class> klass = types[j].Read();
if (klass != nullptr) {
- DCHECK_NE(klass->GetStatus(), mirror::Class::kStatusError);
+ DCHECK(!klass->IsErroneous()) << klass->GetStatus();
}
}
} else {
@@ -2232,7 +2233,7 @@
// For temporary classes we must wait for them to be retired.
if (init_done_ && klass->IsTemp()) {
CHECK(!klass->IsResolved());
- if (klass->IsErroneous()) {
+ if (klass->IsErroneousUnresolved()) {
ThrowEarlierClassFailure(klass);
return nullptr;
}
@@ -2240,10 +2241,10 @@
Handle<mirror::Class> h_class(hs.NewHandle(klass));
ObjectLock<mirror::Class> lock(self, h_class);
// Loop and wait for the resolving thread to retire this class.
- while (!h_class->IsRetired() && !h_class->IsErroneous()) {
+ while (!h_class->IsRetired() && !h_class->IsErroneousUnresolved()) {
lock.WaitIgnoringInterrupts();
}
- if (h_class->IsErroneous()) {
+ if (h_class->IsErroneousUnresolved()) {
ThrowEarlierClassFailure(h_class.Get());
return nullptr;
}
@@ -2258,7 +2259,7 @@
static const size_t kNumYieldIterations = 1000;
// How long each sleep is in us.
static const size_t kSleepDurationUS = 1000; // 1 ms.
- while (!klass->IsResolved() && !klass->IsErroneous()) {
+ while (!klass->IsResolved() && !klass->IsErroneousUnresolved()) {
StackHandleScope<1> hs(self);
HandleWrapperObjPtr<mirror::Class> h_class(hs.NewHandleWrapper(&klass));
{
@@ -2269,7 +2270,7 @@
// Check for circular dependencies between classes, the lock is required for SetStatus.
if (!h_class->IsResolved() && h_class->GetClinitThreadId() == self->GetTid()) {
ThrowClassCircularityError(h_class.Get());
- mirror::Class::SetStatus(h_class, mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(h_class, mirror::Class::kStatusErrorUnresolved, self);
return nullptr;
}
}
@@ -2286,7 +2287,7 @@
++index;
}
- if (klass->IsErroneous()) {
+ if (klass->IsErroneousUnresolved()) {
ThrowEarlierClassFailure(klass);
return nullptr;
}
@@ -2649,6 +2650,10 @@
dex_class_def,
&new_dex_file,
&new_class_def);
+ // Check to see if an exception happened during runtime callbacks. Return if so.
+ if (self->IsExceptionPending()) {
+ return nullptr;
+ }
ObjPtr<mirror::DexCache> dex_cache = RegisterDexFile(*new_dex_file, class_loader.Get());
if (dex_cache == nullptr) {
self->AssertPendingOOMException();
@@ -2687,7 +2692,7 @@
// An exception occured during load, set status to erroneous while holding klass' lock in case
// notification is necessary.
if (!klass->IsErroneous()) {
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusErrorUnresolved, self);
}
return nullptr;
}
@@ -2697,7 +2702,7 @@
if (!LoadSuperAndInterfaces(klass, *new_dex_file)) {
// Loading failed.
if (!klass->IsErroneous()) {
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusErrorUnresolved, self);
}
return nullptr;
}
@@ -2716,13 +2721,13 @@
if (!LinkClass(self, descriptor, klass, interfaces, &h_new_class)) {
// Linking failed.
if (!klass->IsErroneous()) {
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusErrorUnresolved, self);
}
return nullptr;
}
self->AssertNoPendingException();
CHECK(h_new_class.Get() != nullptr) << descriptor;
- CHECK(h_new_class->IsResolved()) << descriptor;
+ CHECK(h_new_class->IsResolved() && !h_new_class->IsErroneousResolved()) << descriptor;
// Instrumentation may have updated entrypoints for all methods of all
// classes. However it could not update methods of this class while we
@@ -2857,9 +2862,12 @@
return true;
}
- if (runtime->IsFullyDeoptable()) {
- // We need to be able to deoptimize at any time so we should always just ignore precompiled
- // code and go to the interpreter assuming we don't already have jitted code.
+ if (runtime->IsJavaDebuggable()) {
+ // For simplicity, we ignore precompiled code and go to the interpreter
+ // assuming we don't already have jitted code.
+ // We could look at the oat file where `quick_code` is being defined,
+ // and check whether it's been compiled debuggable, but we decided to
+ // only rely on the JIT for debuggable apps.
jit::Jit* jit = Runtime::Current()->GetJit();
return (jit == nullptr) || !jit->GetCodeCache()->ContainsPc(quick_code);
}
@@ -2867,18 +2875,13 @@
if (runtime->IsNativeDebuggable()) {
DCHECK(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse());
// If we are doing native debugging, ignore application's AOT code,
- // since we want to JIT it with extra stackmaps for native debugging.
- // On the other hand, keep all AOT code from the boot image, since the
- // blocking JIT would results in non-negligible performance impact.
+ // since we want to JIT it (at first use) with extra stackmaps for native
+ // debugging. We keep however all AOT code from the boot image,
+ // since the JIT-at-first-use is blocking and would result in non-negligible
+ // startup performance impact.
return !runtime->GetHeap()->IsInBootImageOatFile(quick_code);
}
- if (Dbg::IsDebuggerActive()) {
- // Boot image classes may be AOT-compiled as non-debuggable.
- // This is not suitable for the Java debugger, so ignore the AOT code.
- return runtime->GetHeap()->IsInBootImageOatFile(quick_code);
- }
-
return false;
}
@@ -3817,7 +3820,7 @@
}
// Need to grab the lock to change status.
ObjectLock<mirror::Class> super_lock(self, klass);
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusErrorResolved, self);
return false;
}
@@ -3939,8 +3942,8 @@
bool preverified = VerifyClassUsingOatFile(dex_file, klass.Get(), oat_file_class_status);
// If the oat file says the class had an error, re-run the verifier. That way we will get a
// precise error message. To ensure a rerun, test:
- // oat_file_class_status == mirror::Class::kStatusError => !preverified
- DCHECK(!(oat_file_class_status == mirror::Class::kStatusError) || !preverified);
+ // mirror::Class::IsErroneous(oat_file_class_status) => !preverified
+ DCHECK(!mirror::Class::IsErroneous(oat_file_class_status) || !preverified);
std::string error_msg;
verifier::MethodVerifier::FailureKind verifier_failure = verifier::MethodVerifier::kNoFailure;
@@ -3998,7 +4001,7 @@
<< " because: " << error_msg;
self->AssertNoPendingException();
ThrowVerifyError(klass.Get(), "%s", error_msg.c_str());
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusErrorResolved, self);
}
if (preverified || verifier_failure == verifier::MethodVerifier::kNoFailure) {
// Class is verified so we don't need to do any access check on its methods.
@@ -4089,7 +4092,7 @@
// at compile time).
return false;
}
- if (oat_file_class_status == mirror::Class::kStatusError) {
+ if (mirror::Class::IsErroneous(oat_file_class_status)) {
// Compile time verification failed with a hard error. This is caused by invalid instructions
// in the class. These errors are unrecoverable.
return false;
@@ -4248,7 +4251,7 @@
Handle<mirror::ObjectArray<mirror::Class>> h_interfaces(
hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>>(interfaces)));
if (!LinkClass(self, descriptor.c_str(), klass, h_interfaces, &new_class)) {
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusErrorUnresolved, self);
return nullptr;
}
}
@@ -4463,7 +4466,8 @@
return false;
}
- CHECK(klass->IsResolved()) << klass->PrettyClass() << ": state=" << klass->GetStatus();
+ CHECK(klass->IsResolved() && !klass->IsErroneousResolved())
+ << klass->PrettyClass() << ": state=" << klass->GetStatus();
if (!klass->IsVerified()) {
VerifyClass(self, klass);
@@ -4498,7 +4502,7 @@
// A separate thread could have moved us all the way to initialized. A "simple" example
// involves a subclass of the current class being initialized at the same time (which
// will implicitly initialize the superclass, if scheduled that way). b/28254258
- DCHECK_NE(mirror::Class::kStatusError, klass->GetStatus());
+ DCHECK(!klass->IsErroneous()) << klass->GetStatus();
if (klass->IsInitialized()) {
return true;
}
@@ -4525,7 +4529,7 @@
}
if (!ValidateSuperClassDescriptors(klass)) {
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusErrorResolved, self);
return false;
}
self->AllowThreadSuspension();
@@ -4561,7 +4565,7 @@
<< (self->GetException() != nullptr ? self->GetException()->Dump() : "");
ObjectLock<mirror::Class> lock(self, klass);
// Initialization failed because the super-class is erroneous.
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusErrorResolved, self);
return false;
}
}
@@ -4592,7 +4596,7 @@
if (!iface_initialized) {
ObjectLock<mirror::Class> lock(self, klass);
// Initialization failed because one of our interfaces with default methods is erroneous.
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusErrorResolved, self);
return false;
}
}
@@ -4665,7 +4669,7 @@
if (self->IsExceptionPending()) {
WrapExceptionInInitializer(klass);
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusErrorResolved, self);
success = false;
} else if (Runtime::Current()->IsTransactionAborted()) {
// The exception thrown when the transaction aborted has been caught and cleared
@@ -4674,7 +4678,7 @@
<< mirror::Class::PrettyDescriptor(klass.Get())
<< " without exception while transaction was aborted: re-throw it now.";
Runtime::Current()->ThrowTransactionAbortError(self);
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusErrorResolved, self);
success = false;
} else {
RuntimeStats* global_stats = Runtime::Current()->GetStats();
@@ -4758,7 +4762,7 @@
// we were not using WaitIgnoringInterrupts), bail out.
if (self->IsExceptionPending()) {
WrapExceptionInInitializer(klass);
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusErrorResolved, self);
return false;
}
// Spurious wakeup? Go back to waiting.
@@ -5169,7 +5173,7 @@
klass->SetIFieldsPtrUnchecked(nullptr);
if (UNLIKELY(h_new_class.Get() == nullptr)) {
self->AssertPendingOOMException();
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusErrorUnresolved, self);
return false;
}
@@ -7781,7 +7785,7 @@
}
}
}
- DCHECK((resolved == nullptr) || resolved->IsResolved() || resolved->IsErroneous())
+ DCHECK((resolved == nullptr) || resolved->IsResolved())
<< resolved->PrettyDescriptor() << " " << resolved->GetStatus();
return resolved.Ptr();
}
@@ -8478,6 +8482,81 @@
}
}
+class GetResolvedClassesVisitor : public ClassVisitor {
+ public:
+ GetResolvedClassesVisitor(std::set<DexCacheResolvedClasses>* result, bool ignore_boot_classes)
+ : result_(result),
+ ignore_boot_classes_(ignore_boot_classes),
+ last_resolved_classes_(result->end()),
+ last_dex_file_(nullptr),
+ vlog_is_on_(VLOG_IS_ON(class_linker)),
+ extra_stats_(),
+ last_extra_stats_(extra_stats_.end()) { }
+
+ bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!klass->IsProxyClass() &&
+ !klass->IsArrayClass() &&
+ klass->IsResolved() &&
+ !klass->IsErroneousResolved() &&
+ (!ignore_boot_classes_ || klass->GetClassLoader() != nullptr)) {
+ const DexFile& dex_file = klass->GetDexFile();
+ if (&dex_file != last_dex_file_) {
+ last_dex_file_ = &dex_file;
+ DexCacheResolvedClasses resolved_classes(dex_file.GetLocation(),
+ dex_file.GetBaseLocation(),
+ dex_file.GetLocationChecksum());
+ last_resolved_classes_ = result_->find(resolved_classes);
+ if (last_resolved_classes_ == result_->end()) {
+ last_resolved_classes_ = result_->insert(resolved_classes).first;
+ }
+ }
+ bool added = last_resolved_classes_->AddClass(klass->GetDexTypeIndex());
+ if (UNLIKELY(vlog_is_on_) && added) {
+ const DexCacheResolvedClasses* resolved_classes = std::addressof(*last_resolved_classes_);
+ if (last_extra_stats_ == extra_stats_.end() ||
+ last_extra_stats_->first != resolved_classes) {
+ last_extra_stats_ = extra_stats_.find(resolved_classes);
+ if (last_extra_stats_ == extra_stats_.end()) {
+ last_extra_stats_ =
+ extra_stats_.emplace(resolved_classes, ExtraStats(dex_file.NumClassDefs())).first;
+ }
+ }
+ }
+ }
+ return true;
+ }
+
+ void PrintStatistics() const {
+ if (vlog_is_on_) {
+ for (const DexCacheResolvedClasses& resolved_classes : *result_) {
+ auto it = extra_stats_.find(std::addressof(resolved_classes));
+ DCHECK(it != extra_stats_.end());
+ const ExtraStats& extra_stats = it->second;
+ LOG(INFO) << "Dex location " << resolved_classes.GetDexLocation()
+ << " has " << resolved_classes.GetClasses().size() << " / "
+ << extra_stats.number_of_class_defs_ << " resolved classes";
+ }
+ }
+ }
+
+ private:
+ struct ExtraStats {
+ explicit ExtraStats(uint32_t number_of_class_defs)
+ : number_of_class_defs_(number_of_class_defs) {}
+ uint32_t number_of_class_defs_;
+ };
+
+ std::set<DexCacheResolvedClasses>* result_;
+ bool ignore_boot_classes_;
+ std::set<DexCacheResolvedClasses>::iterator last_resolved_classes_;
+ const DexFile* last_dex_file_;
+
+ // Statistics.
+ bool vlog_is_on_;
+ std::map<const DexCacheResolvedClasses*, ExtraStats> extra_stats_;
+ std::map<const DexCacheResolvedClasses*, ExtraStats>::iterator last_extra_stats_;
+};
+
std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_boot_classes) {
ScopedTrace trace(__PRETTY_FUNCTION__);
ScopedObjectAccess soa(Thread::Current());
@@ -8485,64 +8564,12 @@
std::set<DexCacheResolvedClasses> ret;
VLOG(class_linker) << "Collecting resolved classes";
const uint64_t start_time = NanoTime();
- ReaderMutexLock mu(soa.Self(), *Locks::dex_lock_);
- // Loop through all the dex caches and inspect resolved classes.
- for (const ClassLinker::DexCacheData& data : GetDexCachesData()) {
- if (soa.Self()->IsJWeakCleared(data.weak_root)) {
- continue;
- }
- ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(data.weak_root);
- if (dex_cache == nullptr) {
- continue;
- }
- const DexFile* dex_file = dex_cache->GetDexFile();
- const std::string& location = dex_file->GetLocation();
- const size_t num_class_defs = dex_file->NumClassDefs();
- // Use the resolved types, this will miss array classes.
- const size_t num_types = dex_file->NumTypeIds();
- VLOG(class_linker) << "Collecting class profile for dex file " << location
- << " types=" << num_types << " class_defs=" << num_class_defs;
- DexCacheResolvedClasses resolved_classes(dex_file->GetLocation(),
- dex_file->GetBaseLocation(),
- dex_file->GetLocationChecksum());
- size_t num_resolved = 0;
- std::unordered_set<dex::TypeIndex> class_set;
- CHECK_EQ(num_types, dex_cache->NumResolvedTypes());
- for (size_t i = 0; i < num_types; ++i) {
- ObjPtr<mirror::Class> klass = dex_cache->GetResolvedType(dex::TypeIndex(i));
- // Filter out null class loader since that is the boot class loader.
- if (klass == nullptr || (ignore_boot_classes && klass->GetClassLoader() == nullptr)) {
- continue;
- }
- ++num_resolved;
- DCHECK(!klass->IsProxyClass());
- if (!klass->IsResolved()) {
- DCHECK(klass->IsErroneous());
- continue;
- }
- ObjPtr<mirror::DexCache> klass_dex_cache = klass->GetDexCache();
- if (klass_dex_cache == dex_cache) {
- DCHECK(klass->IsResolved());
- CHECK_LT(klass->GetDexClassDefIndex(), num_class_defs);
- class_set.insert(klass->GetDexTypeIndex());
- }
- }
-
- if (!class_set.empty()) {
- auto it = ret.find(resolved_classes);
- if (it != ret.end()) {
- // Already have the key, union the class type indexes.
- it->AddClasses(class_set.begin(), class_set.end());
- } else {
- resolved_classes.AddClasses(class_set.begin(), class_set.end());
- ret.insert(resolved_classes);
- }
- }
-
- VLOG(class_linker) << "Dex location " << location << " has " << num_resolved << " / "
- << num_class_defs << " resolved classes";
+ GetResolvedClassesVisitor visitor(&ret, ignore_boot_classes);
+ VisitClasses(&visitor);
+ if (VLOG_IS_ON(class_linker)) {
+ visitor.PrintStatistics();
+ LOG(INFO) << "Collecting class profile took " << PrettyDuration(NanoTime() - start_time);
}
- VLOG(class_linker) << "Collecting class profile took " << PrettyDuration(NanoTime() - start_time);
return ret;
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d3bb58d..5042fb7 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -1213,7 +1213,7 @@
const DexFile& initial_dex_file ATTRIBUTE_UNUSED,
const DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
/*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
- /*out*/DexFile::ClassDef const** final_dex_cache ATTRIBUTE_UNUSED)
+ /*out*/DexFile::ClassDef const** final_class_def ATTRIBUTE_UNUSED)
REQUIRES_SHARED(Locks::mutator_lock_) {}
// A class has been loaded.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 7b6c0dc..17510bb 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -87,6 +87,7 @@
EXPECT_FALSE(primitive->IsErroneous());
EXPECT_TRUE(primitive->IsLoaded());
EXPECT_TRUE(primitive->IsResolved());
+ EXPECT_FALSE(primitive->IsErroneousResolved());
EXPECT_TRUE(primitive->IsVerified());
EXPECT_TRUE(primitive->IsInitialized());
EXPECT_FALSE(primitive->IsArrayInstance());
@@ -125,6 +126,7 @@
EXPECT_FALSE(JavaLangObject->IsErroneous());
EXPECT_TRUE(JavaLangObject->IsLoaded());
EXPECT_TRUE(JavaLangObject->IsResolved());
+ EXPECT_FALSE(JavaLangObject->IsErroneousResolved());
EXPECT_TRUE(JavaLangObject->IsVerified());
EXPECT_TRUE(JavaLangObject->IsInitialized());
EXPECT_FALSE(JavaLangObject->IsArrayInstance());
@@ -199,6 +201,7 @@
EXPECT_FALSE(array->IsErroneous());
EXPECT_TRUE(array->IsLoaded());
EXPECT_TRUE(array->IsResolved());
+ EXPECT_FALSE(array->IsErroneousResolved());
EXPECT_TRUE(array->IsVerified());
EXPECT_TRUE(array->IsInitialized());
EXPECT_FALSE(array->IsArrayInstance());
@@ -270,6 +273,7 @@
EXPECT_TRUE(klass->GetDexCache() != nullptr);
EXPECT_TRUE(klass->IsLoaded());
EXPECT_TRUE(klass->IsResolved());
+ EXPECT_FALSE(klass->IsErroneousResolved());
EXPECT_FALSE(klass->IsErroneous());
EXPECT_FALSE(klass->IsArrayClass());
EXPECT_TRUE(klass->GetComponentType() == nullptr);
@@ -739,13 +743,22 @@
}
};
+struct MethodHandleOffsets : public CheckOffsets<mirror::MethodHandle> {
+ MethodHandleOffsets() : CheckOffsets<mirror::MethodHandle>(
+ false, "Ljava/lang/invoke/MethodHandle;") {
+ addOffset(OFFSETOF_MEMBER(mirror::MethodHandle, art_field_or_method_), "artFieldOrMethod");
+ addOffset(OFFSETOF_MEMBER(mirror::MethodHandle, cached_spread_invoker_),
+ "cachedSpreadInvoker");
+ addOffset(OFFSETOF_MEMBER(mirror::MethodHandle, handle_kind_), "handleKind");
+ addOffset(OFFSETOF_MEMBER(mirror::MethodHandle, nominal_type_), "nominalType");
+ addOffset(OFFSETOF_MEMBER(mirror::MethodHandle, method_type_), "type");
+ }
+};
+
struct MethodHandleImplOffsets : public CheckOffsets<mirror::MethodHandleImpl> {
MethodHandleImplOffsets() : CheckOffsets<mirror::MethodHandleImpl>(
- false, "Ljava/lang/invoke/MethodHandle;") {
- addOffset(OFFSETOF_MEMBER(mirror::MethodHandleImpl, art_field_or_method_), "artFieldOrMethod");
- addOffset(OFFSETOF_MEMBER(mirror::MethodHandleImpl, handle_kind_), "handleKind");
- addOffset(OFFSETOF_MEMBER(mirror::MethodHandleImpl, nominal_type_), "nominalType");
- addOffset(OFFSETOF_MEMBER(mirror::MethodHandleImpl, method_type_), "type");
+ false, "Ljava/lang/invoke/MethodHandleImpl;") {
+ addOffset(OFFSETOF_MEMBER(mirror::MethodHandleImpl, info_), "info");
}
};
@@ -779,6 +792,7 @@
EXPECT_TRUE(FieldOffsets().Check());
EXPECT_TRUE(ExecutableOffsets().Check());
EXPECT_TRUE(MethodTypeOffsets().Check());
+ EXPECT_TRUE(MethodHandleOffsets().Check());
EXPECT_TRUE(MethodHandleImplOffsets().Check());
EXPECT_TRUE(EmulatedStackFrameOffsets().Check());
}
@@ -857,6 +871,7 @@
EXPECT_FALSE(MyClass->IsErroneous());
EXPECT_TRUE(MyClass->IsLoaded());
EXPECT_TRUE(MyClass->IsResolved());
+ EXPECT_FALSE(MyClass->IsErroneousResolved());
EXPECT_FALSE(MyClass->IsVerified());
EXPECT_FALSE(MyClass->IsInitialized());
EXPECT_FALSE(MyClass->IsArrayInstance());
@@ -941,6 +956,47 @@
array_klass);
}
+TEST_F(ClassLinkerTest, LookupResolvedTypeErroneousInit) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader>(LoadDex("ErroneousInit"))));
+ AssertNonExistentClass("LErroneousInit;");
+ Handle<mirror::Class> klass =
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LErroneousInit;", class_loader));
+ ASSERT_OBJ_PTR_NE(klass.Get(), ObjPtr<mirror::Class>(nullptr));
+ dex::TypeIndex type_idx = klass->GetClassDef()->class_idx_;
+ Handle<mirror::DexCache> dex_cache = hs.NewHandle(klass->GetDexCache());
+ const DexFile& dex_file = klass->GetDexFile();
+ EXPECT_OBJ_PTR_EQ(
+ class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
+ klass.Get());
+ // Zero out the resolved type and make sure LookupResolvedType still finds it.
+ dex_cache->SetResolvedType(type_idx, nullptr);
+ EXPECT_TRUE(dex_cache->GetResolvedType(type_idx) == nullptr);
+ EXPECT_OBJ_PTR_EQ(
+ class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
+ klass.Get());
+ // Force initialization to turn the class erroneous.
+ bool initialized = class_linker_->EnsureInitialized(soa.Self(),
+ klass,
+ /* can_init_fields */ true,
+ /* can_init_parents */ true);
+ EXPECT_FALSE(initialized);
+ EXPECT_TRUE(soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException();
+ // Check that the LookupResolvedType() can still find the resolved type.
+ EXPECT_OBJ_PTR_EQ(
+ class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
+ klass.Get());
+ // Zero out the resolved type and make sure LookupResolvedType() still finds it.
+ dex_cache->SetResolvedType(type_idx, nullptr);
+ EXPECT_TRUE(dex_cache->GetResolvedType(type_idx) == nullptr);
+ EXPECT_OBJ_PTR_EQ(
+ class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
+ klass.Get());
+}
+
TEST_F(ClassLinkerTest, LibCore) {
ScopedObjectAccess soa(Thread::Current());
ASSERT_TRUE(java_lang_dex_file_ != nullptr);
diff --git a/runtime/compiler_filter.cc b/runtime/compiler_filter.cc
index dc89d32..cb8c11d 100644
--- a/runtime/compiler_filter.cc
+++ b/runtime/compiler_filter.cc
@@ -33,7 +33,6 @@
case CompilerFilter::kTime:
case CompilerFilter::kSpeedProfile:
case CompilerFilter::kSpeed:
- case CompilerFilter::kLayoutProfile:
case CompilerFilter::kEverythingProfile:
case CompilerFilter::kEverything: return true;
}
@@ -53,7 +52,6 @@
case CompilerFilter::kTime:
case CompilerFilter::kSpeedProfile:
case CompilerFilter::kSpeed:
- case CompilerFilter::kLayoutProfile:
case CompilerFilter::kEverythingProfile:
case CompilerFilter::kEverything: return true;
}
@@ -73,7 +71,6 @@
case CompilerFilter::kTime:
case CompilerFilter::kSpeedProfile:
case CompilerFilter::kSpeed:
- case CompilerFilter::kLayoutProfile:
case CompilerFilter::kEverythingProfile:
case CompilerFilter::kEverything: return true;
}
@@ -93,7 +90,6 @@
case CompilerFilter::kTime:
case CompilerFilter::kSpeedProfile:
case CompilerFilter::kSpeed:
- case CompilerFilter::kLayoutProfile:
case CompilerFilter::kEverythingProfile:
case CompilerFilter::kEverything: return true;
}
@@ -120,7 +116,6 @@
case CompilerFilter::kVerifyProfile:
case CompilerFilter::kSpaceProfile:
case CompilerFilter::kSpeedProfile:
- case CompilerFilter::kLayoutProfile:
case CompilerFilter::kEverythingProfile: return true;
}
UNREACHABLE();
@@ -145,7 +140,6 @@
return CompilerFilter::kSpace;
case CompilerFilter::kSpeedProfile:
- case CompilerFilter::kLayoutProfile:
return CompilerFilter::kSpeed;
case CompilerFilter::kEverythingProfile:
@@ -171,7 +165,6 @@
case CompilerFilter::kTime: return "time";
case CompilerFilter::kSpeedProfile: return "speed-profile";
case CompilerFilter::kSpeed: return "speed";
- case CompilerFilter::kLayoutProfile: return "layout-profile";
case CompilerFilter::kEverythingProfile: return "everything-profile";
case CompilerFilter::kEverything: return "everything";
}
@@ -199,8 +192,6 @@
*filter = kSpeed;
} else if (strcmp(option, "speed-profile") == 0) {
*filter = kSpeedProfile;
- } else if (strcmp(option, "layout-profile") == 0) {
- *filter = kLayoutProfile;
} else if (strcmp(option, "everything") == 0) {
*filter = kEverything;
} else if (strcmp(option, "everything-profile") == 0) {
diff --git a/runtime/compiler_filter.h b/runtime/compiler_filter.h
index 7eb5f9a..796f4aa 100644
--- a/runtime/compiler_filter.h
+++ b/runtime/compiler_filter.h
@@ -39,7 +39,6 @@
kSpace, // Maximize space savings.
kBalanced, // Good performance return on compilation investment.
kSpeedProfile, // Maximize runtime performance based on profile.
- kLayoutProfile, // Temporary filter for dexlayout. Will be merged with kSpeedProfile.
kSpeed, // Maximize runtime performance.
kEverythingProfile, // Compile everything capable of being compiled based on profile.
kEverything, // Compile everything capable of being compiled.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 22a3163..1a0cec0 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -588,29 +588,6 @@
return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
}
-// Used to patch boot image method entry point to interpreter bridge.
-class UpdateEntryPointsClassVisitor : public ClassVisitor {
- public:
- explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
- : instrumentation_(instrumentation) {}
-
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
- auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- for (auto& m : klass->GetMethods(pointer_size)) {
- const void* code = m.GetEntryPointFromQuickCompiledCode();
- if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
- !m.IsNative() &&
- !m.IsProxyMethod()) {
- instrumentation_->UpdateMethodsCodeFromDebugger(&m, GetQuickToInterpreterBridge());
- }
- }
- return true;
- }
-
- private:
- instrumentation::Instrumentation* const instrumentation_;
-};
-
void Dbg::GoActive() {
// Enable all debugging features, including scans for breakpoints.
// This is a no-op if we're already active.
@@ -639,14 +616,16 @@
}
Runtime* runtime = Runtime::Current();
- // Since boot image code may be AOT compiled as not debuggable, we need to patch
- // entry points of methods in boot image to interpreter bridge.
- // However, the performance cost of this is non-negligible during native-debugging due to the
+ // Best effort deoptimization if the runtime is non-Java debuggable. This happens when
+ // ro.debuggable is set, but the application is not debuggable, or when a standalone
+ // dalvikvm invocation is not passed the debuggable option (-Xcompiler-option --debuggable).
+ //
+ // The performance cost of this is non-negligible during native-debugging due to the
// forced JIT, so we keep the AOT code in that case in exchange for limited native debugging.
- if (!runtime->GetInstrumentation()->IsForcedInterpretOnly() && !runtime->IsNativeDebuggable()) {
- ScopedObjectAccess soa(self);
- UpdateEntryPointsClassVisitor visitor(runtime->GetInstrumentation());
- runtime->GetClassLinker()->VisitClasses(&visitor);
+ if (!runtime->IsJavaDebuggable() &&
+ !runtime->GetInstrumentation()->IsForcedInterpretOnly() &&
+ !runtime->IsNativeDebuggable()) {
+ runtime->DeoptimizeBootImage();
}
ScopedSuspendAll ssa(__FUNCTION__);
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index 7ae9f03..8b0c51c 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -25,6 +25,7 @@
#include "common_runtime_test.h"
#include "compiler_callbacks.h"
+#include "exec_utils.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
#include "oat_file_assistant.h"
diff --git a/runtime/dex_cache_resolved_classes.h b/runtime/dex_cache_resolved_classes.h
index f53ca4a..bebdf0d 100644
--- a/runtime/dex_cache_resolved_classes.h
+++ b/runtime/dex_cache_resolved_classes.h
@@ -44,6 +44,10 @@
return dex_location_.compare(other.dex_location_);
}
+ bool AddClass(dex::TypeIndex index) const {
+ return classes_.insert(index).second;
+ }
+
template <class InputIt>
void AddClasses(InputIt begin, InputIt end) const {
classes_.insert(begin, end);
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 0fec856..9dca4c0 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -338,13 +338,16 @@
ScopedObjectAccess soa(Thread::Current());
std::unique_ptr<const DexFile> raw(OpenTestDexFile("Nested"));
ASSERT_TRUE(raw.get() != nullptr);
- EXPECT_EQ(2U, raw->NumClassDefs());
+ EXPECT_EQ(3U, raw->NumClassDefs());
const DexFile::ClassDef& c0 = raw->GetClassDef(0);
- EXPECT_STREQ("LNested$Inner;", raw->GetClassDescriptor(c0));
+ EXPECT_STREQ("LNested$1;", raw->GetClassDescriptor(c0));
const DexFile::ClassDef& c1 = raw->GetClassDef(1);
- EXPECT_STREQ("LNested;", raw->GetClassDescriptor(c1));
+ EXPECT_STREQ("LNested$Inner;", raw->GetClassDescriptor(c1));
+
+ const DexFile::ClassDef& c2 = raw->GetClassDef(2);
+ EXPECT_STREQ("LNested;", raw->GetClassDescriptor(c2));
}
TEST_F(DexFileTest, GetMethodSignature) {
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
new file mode 100644
index 0000000..69c6151
--- /dev/null
+++ b/runtime/dexopt_test.cc
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+#include <vector>
+
+#include <backtrace/BacktraceMap.h>
+#include <gtest/gtest.h>
+
+#include "common_runtime_test.h"
+#include "compiler_callbacks.h"
+#include "dex2oat_environment_test.h"
+#include "dexopt_test.h"
+#include "gc/space/image_space.h"
+#include "mem_map.h"
+
+namespace art {
+void DexoptTest::SetUp() {
+ ReserveImageSpace();
+ Dex2oatEnvironmentTest::SetUp();
+}
+
+void DexoptTest::PreRuntimeCreate() {
+ std::string error_msg;
+ ASSERT_TRUE(PreRelocateImage(GetImageLocation(), &error_msg)) << error_msg;
+ ASSERT_TRUE(PreRelocateImage(GetImageLocation2(), &error_msg)) << error_msg;
+ UnreserveImageSpace();
+}
+
+void DexoptTest::PostRuntimeCreate() {
+ ReserveImageSpace();
+}
+
+void DexoptTest::GenerateOatForTest(const std::string& dex_location,
+ const std::string& oat_location,
+ CompilerFilter::Filter filter,
+ bool relocate,
+ bool pic,
+ bool with_alternate_image) {
+ std::string dalvik_cache = GetDalvikCache(GetInstructionSetString(kRuntimeISA));
+ std::string dalvik_cache_tmp = dalvik_cache + ".redirected";
+
+ if (!relocate) {
+ // Temporarily redirect the dalvik cache so dex2oat doesn't find the
+ // relocated image file.
+ ASSERT_EQ(0, rename(dalvik_cache.c_str(), dalvik_cache_tmp.c_str())) << strerror(errno);
+ }
+
+ std::vector<std::string> args;
+ args.push_back("--dex-file=" + dex_location);
+ args.push_back("--oat-file=" + oat_location);
+ args.push_back("--compiler-filter=" + CompilerFilter::NameOfFilter(filter));
+ args.push_back("--runtime-arg");
+
+ // Use -Xnorelocate regardless of the relocate argument.
+ // We control relocation by redirecting the dalvik cache when needed
+ // rather than use this flag.
+ args.push_back("-Xnorelocate");
+
+ if (pic) {
+ args.push_back("--compile-pic");
+ }
+
+ std::string image_location = GetImageLocation();
+ if (with_alternate_image) {
+ args.push_back("--boot-image=" + GetImageLocation2());
+ }
+
+ std::string error_msg;
+ ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
+
+ if (!relocate) {
+ // Restore the dalvik cache if needed.
+ ASSERT_EQ(0, rename(dalvik_cache_tmp.c_str(), dalvik_cache.c_str())) << strerror(errno);
+ }
+
+ // Verify the odex file was generated as expected.
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(oat_location.c_str(),
+ oat_location.c_str(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ dex_location.c_str(),
+ &error_msg));
+ ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
+ EXPECT_EQ(pic, odex_file->IsPic());
+ EXPECT_EQ(filter, odex_file->GetCompilerFilter());
+
+ std::unique_ptr<ImageHeader> image_header(
+ gc::space::ImageSpace::ReadImageHeader(image_location.c_str(),
+ kRuntimeISA,
+ &error_msg));
+ ASSERT_TRUE(image_header != nullptr) << error_msg;
+ const OatHeader& oat_header = odex_file->GetOatHeader();
+ uint32_t combined_checksum = OatFileAssistant::CalculateCombinedImageChecksum();
+
+ if (CompilerFilter::DependsOnImageChecksum(filter)) {
+ if (with_alternate_image) {
+ EXPECT_NE(combined_checksum, oat_header.GetImageFileLocationOatChecksum());
+ } else {
+ EXPECT_EQ(combined_checksum, oat_header.GetImageFileLocationOatChecksum());
+ }
+ }
+
+ if (!with_alternate_image) {
+ if (CompilerFilter::IsBytecodeCompilationEnabled(filter)) {
+ if (relocate) {
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin()),
+ oat_header.GetImageFileLocationOatDataBegin());
+ EXPECT_EQ(image_header->GetPatchDelta(), oat_header.GetImagePatchDelta());
+ } else {
+ EXPECT_NE(reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin()),
+ oat_header.GetImageFileLocationOatDataBegin());
+ EXPECT_NE(image_header->GetPatchDelta(), oat_header.GetImagePatchDelta());
+ }
+ }
+ }
+}
+
+void DexoptTest::GenerateOdexForTest(const std::string& dex_location,
+ const std::string& odex_location,
+ CompilerFilter::Filter filter) {
+ GenerateOatForTest(dex_location,
+ odex_location,
+ filter,
+ /*relocate*/false,
+ /*pic*/false,
+ /*with_alternate_image*/false);
+}
+
+void DexoptTest::GeneratePicOdexForTest(const std::string& dex_location,
+ const std::string& odex_location,
+ CompilerFilter::Filter filter) {
+ GenerateOatForTest(dex_location,
+ odex_location,
+ filter,
+ /*relocate*/false,
+ /*pic*/true,
+ /*with_alternate_image*/false);
+}
+
+void DexoptTest::GenerateOatForTest(const char* dex_location,
+ CompilerFilter::Filter filter,
+ bool relocate,
+ bool pic,
+ bool with_alternate_image) {
+ std::string oat_location;
+ std::string error_msg;
+ ASSERT_TRUE(OatFileAssistant::DexLocationToOatFilename(
+ dex_location, kRuntimeISA, &oat_location, &error_msg)) << error_msg;
+ GenerateOatForTest(dex_location,
+ oat_location,
+ filter,
+ relocate,
+ pic,
+ with_alternate_image);
+}
+
+void DexoptTest::GenerateOatForTest(const char* dex_location, CompilerFilter::Filter filter) {
+ GenerateOatForTest(dex_location,
+ filter,
+ /*relocate*/true,
+ /*pic*/false,
+ /*with_alternate_image*/false);
+}
+
+bool DexoptTest::PreRelocateImage(const std::string& image_location, std::string* error_msg) {
+ std::string image;
+ if (!GetCachedImageFile(image_location, &image, error_msg)) {
+ return false;
+ }
+
+ std::string patchoat = GetAndroidRoot();
+ patchoat += kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat";
+
+ std::vector<std::string> argv;
+ argv.push_back(patchoat);
+ argv.push_back("--input-image-location=" + image_location);
+ argv.push_back("--output-image-file=" + image);
+ argv.push_back("--instruction-set=" + std::string(GetInstructionSetString(kRuntimeISA)));
+ argv.push_back("--base-offset-delta=0x00008000");
+ return Exec(argv, error_msg);
+}
+
+void DexoptTest::ReserveImageSpace() {
+ MemMap::Init();
+
+ // Ensure a chunk of memory is reserved for the image space.
+ // The reservation_end includes room for the main space that has to come
+ // right after the image in case of the GSS collector.
+ uintptr_t reservation_start = ART_BASE_ADDRESS;
+ uintptr_t reservation_end = ART_BASE_ADDRESS + 384 * MB;
+
+ std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
+ ASSERT_TRUE(map.get() != nullptr) << "Failed to build process map";
+ for (BacktraceMap::const_iterator it = map->begin();
+ reservation_start < reservation_end && it != map->end(); ++it) {
+ ReserveImageSpaceChunk(reservation_start, std::min(it->start, reservation_end));
+ reservation_start = std::max(reservation_start, it->end);
+ }
+ ReserveImageSpaceChunk(reservation_start, reservation_end);
+}
+
+void DexoptTest::ReserveImageSpaceChunk(uintptr_t start, uintptr_t end) {
+ if (start < end) {
+ std::string error_msg;
+ image_reservation_.push_back(std::unique_ptr<MemMap>(
+ MemMap::MapAnonymous("image reservation",
+ reinterpret_cast<uint8_t*>(start), end - start,
+ PROT_NONE, false, false, &error_msg)));
+ ASSERT_TRUE(image_reservation_.back().get() != nullptr) << error_msg;
+ LOG(INFO) << "Reserved space for image " <<
+ reinterpret_cast<void*>(image_reservation_.back()->Begin()) << "-" <<
+ reinterpret_cast<void*>(image_reservation_.back()->End());
+ }
+}
+
+void DexoptTest::UnreserveImageSpace() {
+ image_reservation_.clear();
+}
+
+} // namespace art
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
new file mode 100644
index 0000000..5f0eafd
--- /dev/null
+++ b/runtime/dexopt_test.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEXOPT_TEST_H_
+#define ART_RUNTIME_DEXOPT_TEST_H_
+
+#include <string>
+#include <vector>
+
+#include "dex2oat_environment_test.h"
+
+namespace art {
+
+class DexoptTest : public Dex2oatEnvironmentTest {
+ public:
+ virtual void SetUp() OVERRIDE;
+
+ virtual void PreRuntimeCreate();
+
+ virtual void PostRuntimeCreate() OVERRIDE;
+
+ // Generate an oat file for the purposes of test.
+ // The oat file will be generated for dex_location in the given oat_location
+ // with the following configuration:
+ // filter - controls the compilation filter
+ // pic - whether or not the code will be PIC
+ // relocate - if true, the oat file will be relocated with respect to the
+ // boot image. Otherwise the oat file will not be relocated.
+ // with_alternate_image - if true, the oat file will be generated with an
+ // image checksum different than the current image checksum.
+ void GenerateOatForTest(const std::string& dex_location,
+ const std::string& oat_location,
+ CompilerFilter::Filter filter,
+ bool relocate,
+ bool pic,
+ bool with_alternate_image);
+
+ // Generate a non-PIC odex file for the purposes of test.
+ // The generated odex file will be un-relocated.
+ void GenerateOdexForTest(const std::string& dex_location,
+ const std::string& odex_location,
+ CompilerFilter::Filter filter);
+
+ void GeneratePicOdexForTest(const std::string& dex_location,
+ const std::string& odex_location,
+ CompilerFilter::Filter filter);
+
+ // Generate an oat file for the given dex location in its oat location (under
+ // the dalvik cache).
+ void GenerateOatForTest(const char* dex_location,
+ CompilerFilter::Filter filter,
+ bool relocate,
+ bool pic,
+ bool with_alternate_image);
+
+ // Generate a standard oat file in the oat location.
+ void GenerateOatForTest(const char* dex_location, CompilerFilter::Filter filter);
+
+ private:
+ // Pre-Relocate the image to a known non-zero offset so we don't have to
+ // deal with the runtime randomly relocating the image by 0 and messing up
+ // the expected results of the tests.
+ bool PreRelocateImage(const std::string& image_location, std::string* error_msg);
+
+ // Reserve memory around where the image will be loaded so other memory
+ // won't conflict when it comes time to load the image.
+ // This can be called with an already loaded image to reserve the space
+ // around it.
+ void ReserveImageSpace();
+
+ // Reserve a chunk of memory for the image space in the given range.
+ // Only has effect for chunks with a positive number of bytes.
+ void ReserveImageSpaceChunk(uintptr_t start, uintptr_t end);
+
+ // Unreserve any memory reserved by ReserveImageSpace. This should be called
+ // before the image is loaded.
+ void UnreserveImageSpace();
+
+ std::vector<std::unique_ptr<MemMap>> image_reservation_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_DEXOPT_TEST_H_
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index ac0ce36..28aca6c 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -76,6 +76,10 @@
// Lookup the declaring class of the inlined method.
const DexFile* dex_file = caller->GetDexFile();
const DexFile::MethodId& method_id = dex_file->GetMethodId(method_index);
+ ArtMethod* inlined_method = caller->GetDexCacheResolvedMethod(method_index, kRuntimePointerSize);
+ if (inlined_method != nullptr && !inlined_method->IsRuntimeMethod()) {
+ return inlined_method;
+ }
const char* descriptor = dex_file->StringByTypeIdx(method_id.class_idx_);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
Thread* self = Thread::Current();
@@ -92,8 +96,7 @@
const char* method_name = dex_file->GetMethodName(method_id);
const Signature signature = dex_file->GetMethodSignature(method_id);
- ArtMethod* inlined_method =
- klass->FindDeclaredDirectMethod(method_name, signature, kRuntimePointerSize);
+ inlined_method = klass->FindDeclaredDirectMethod(method_name, signature, kRuntimePointerSize);
if (inlined_method == nullptr) {
inlined_method = klass->FindDeclaredVirtualMethod(method_name, signature, kRuntimePointerSize);
if (inlined_method == nullptr) {
@@ -103,6 +106,7 @@
<< "This must be due to duplicate classes or playing wrongly with class loaders";
}
}
+ caller->SetDexCacheResolvedMethod(method_index, inlined_method, kRuntimePointerSize);
return inlined_method;
}
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 25fd727..06c11f5 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -254,5 +254,10 @@
return result;
}
+ArtMethod* GetCalleeSaveOuterMethod(Thread* self, Runtime::CalleeSaveType type) {
+ ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ return DoGetCalleeSaveMethodOuterCallerAndPc(sp, type).first;
+}
} // namespace art
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 6a04f20..69ee3eb 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -191,6 +191,9 @@
Runtime::CalleeSaveType type)
REQUIRES_SHARED(Locks::mutator_lock_);
+ArtMethod* GetCalleeSaveOuterMethod(Thread* self, Runtime::CalleeSaveType type)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
} // namespace art
#endif // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index e9f09b2..582f0cf 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -129,29 +129,34 @@
#define GENERATE_ENTRYPOINTS(suffix) \
extern "C" void* art_quick_alloc_array_resolved##suffix(mirror::Class* klass, int32_t); \
+extern "C" void* art_quick_alloc_array_resolved8##suffix(mirror::Class* klass, int32_t); \
+extern "C" void* art_quick_alloc_array_resolved16##suffix(mirror::Class* klass, int32_t); \
+extern "C" void* art_quick_alloc_array_resolved32##suffix(mirror::Class* klass, int32_t); \
+extern "C" void* art_quick_alloc_array_resolved64##suffix(mirror::Class* klass, int32_t); \
extern "C" void* art_quick_alloc_object_resolved##suffix(mirror::Class* klass); \
extern "C" void* art_quick_alloc_object_initialized##suffix(mirror::Class* klass); \
extern "C" void* art_quick_alloc_object_with_checks##suffix(mirror::Class* klass); \
-extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, int32_t, ArtMethod* ref); \
-extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_string_from_bytes##suffix(void*, int32_t, int32_t, int32_t); \
extern "C" void* art_quick_alloc_string_from_chars##suffix(int32_t, int32_t, void*); \
extern "C" void* art_quick_alloc_string_from_string##suffix(void*); \
-extern "C" void* art_quick_alloc_array##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_array_resolved##suffix##_instrumented(mirror::Class* klass, int32_t); \
-extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object##suffix##_instrumented(uint32_t type_idx, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_array_resolved8##suffix##_instrumented(mirror::Class* klass, int32_t); \
+extern "C" void* art_quick_alloc_array_resolved16##suffix##_instrumented(mirror::Class* klass, int32_t); \
+extern "C" void* art_quick_alloc_array_resolved32##suffix##_instrumented(mirror::Class* klass, int32_t); \
+extern "C" void* art_quick_alloc_array_resolved64##suffix##_instrumented(mirror::Class* klass, int32_t); \
extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(mirror::Class* klass); \
extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(mirror::Class* klass); \
extern "C" void* art_quick_alloc_object_with_checks##suffix##_instrumented(mirror::Class* klass); \
-extern "C" void* art_quick_check_and_alloc_array##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
-extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_string_from_bytes##suffix##_instrumented(void*, int32_t, int32_t, int32_t); \
extern "C" void* art_quick_alloc_string_from_chars##suffix##_instrumented(int32_t, int32_t, void*); \
extern "C" void* art_quick_alloc_string_from_string##suffix##_instrumented(void*); \
void SetQuickAllocEntryPoints##suffix(QuickEntryPoints* qpoints, bool instrumented) { \
if (instrumented) { \
qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix##_instrumented; \
+ qpoints->pAllocArrayResolved8 = art_quick_alloc_array_resolved8##suffix##_instrumented; \
+ qpoints->pAllocArrayResolved16 = art_quick_alloc_array_resolved16##suffix##_instrumented; \
+ qpoints->pAllocArrayResolved32 = art_quick_alloc_array_resolved32##suffix##_instrumented; \
+ qpoints->pAllocArrayResolved64 = art_quick_alloc_array_resolved64##suffix##_instrumented; \
qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix##_instrumented; \
qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix##_instrumented; \
qpoints->pAllocObjectWithChecks = art_quick_alloc_object_with_checks##suffix##_instrumented; \
@@ -160,6 +165,10 @@
qpoints->pAllocStringFromString = art_quick_alloc_string_from_string##suffix##_instrumented; \
} else { \
qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix; \
+ qpoints->pAllocArrayResolved8 = art_quick_alloc_array_resolved8##suffix; \
+ qpoints->pAllocArrayResolved16 = art_quick_alloc_array_resolved16##suffix; \
+ qpoints->pAllocArrayResolved32 = art_quick_alloc_array_resolved32##suffix; \
+ qpoints->pAllocArrayResolved64 = art_quick_alloc_array_resolved64##suffix; \
qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix; \
qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix; \
qpoints->pAllocObjectWithChecks = art_quick_alloc_object_with_checks##suffix; \
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 22b0f92..e0a2e3c 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -21,6 +21,10 @@
#define QUICK_ENTRYPOINT_LIST(V) \
V(AllocArrayResolved, void*, mirror::Class*, int32_t) \
+ V(AllocArrayResolved8, void*, mirror::Class*, int32_t) \
+ V(AllocArrayResolved16, void*, mirror::Class*, int32_t) \
+ V(AllocArrayResolved32, void*, mirror::Class*, int32_t) \
+ V(AllocArrayResolved64, void*, mirror::Class*, int32_t) \
V(AllocObjectResolved, void*, mirror::Class*) \
V(AllocObjectInitialized, void*, mirror::Class*) \
V(AllocObjectWithChecks, void*, mirror::Class*) \
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 6d17000..4544aef 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -55,261 +55,207 @@
return field;
}
-extern "C" ssize_t artGetByteStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
- if (LIKELY(field != nullptr)) {
- return field->GetByte(field->GetDeclaringClass());
+static ArtMethod* GetReferrer(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kIsDebugBuild) {
+ // stub_test doesn't call this code with a proper frame, so get the outer, and if
+ // it does not have compiled code return it.
+ ArtMethod* outer = GetCalleeSaveOuterMethod(self, Runtime::kSaveRefsOnly);
+ if (outer->GetEntryPointFromQuickCompiledCode() == nullptr) {
+ return outer;
+ }
}
- field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int8_t));
- if (LIKELY(field != nullptr)) {
- return field->GetByte(field->GetDeclaringClass());
- }
- return 0; // Will throw exception by checking with Thread::Current.
+ return GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveRefsOnly).caller;
}
-extern "C" size_t artGetBooleanStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
+#define ART_GET_FIELD_FROM_CODE(Kind, PrimitiveType, RetType, SetType, \
+ PrimitiveOrObject, IsObject, Ptr) \
+ extern "C" RetType artGet ## Kind ## StaticFromCode(uint32_t field_idx, \
+ ArtMethod* referrer, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ ScopedQuickEntrypointChecks sqec(self); \
+ ArtField* field = FindFieldFast( \
+ field_idx, referrer, Static ## PrimitiveOrObject ## Read, \
+ sizeof(PrimitiveType)); \
+ if (LIKELY(field != nullptr)) { \
+ return field->Get ## Kind (field->GetDeclaringClass())Ptr; \
+ } \
+ field = FindFieldFromCode<Static ## PrimitiveOrObject ## Read, true>( \
+ field_idx, referrer, self, sizeof(PrimitiveType)); \
+ if (LIKELY(field != nullptr)) { \
+ return field->Get ## Kind (field->GetDeclaringClass())Ptr; \
+ } \
+ /* Will throw exception by checking with Thread::Current. */ \
+ return 0; \
+ } \
+ \
+ extern "C" RetType artGet ## Kind ## InstanceFromCode(uint32_t field_idx, \
+ mirror::Object* obj, \
+ ArtMethod* referrer, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ ScopedQuickEntrypointChecks sqec(self); \
+ ArtField* field = FindFieldFast( \
+ field_idx, referrer, Instance ## PrimitiveOrObject ## Read, \
+ sizeof(PrimitiveType)); \
+ if (LIKELY(field != nullptr) && obj != nullptr) { \
+ return field->Get ## Kind (obj)Ptr; \
+ } \
+ field = FindInstanceField<Instance ## PrimitiveOrObject ## Read, true>( \
+ field_idx, referrer, self, sizeof(PrimitiveType), &obj); \
+ if (LIKELY(field != nullptr)) { \
+ return field->Get ## Kind (obj)Ptr; \
+ } \
+ /* Will throw exception by checking with Thread::Current. */ \
+ return 0; \
+ } \
+ \
+ extern "C" int artSet ## Kind ## StaticFromCode(uint32_t field_idx, \
+ SetType new_value, \
+ ArtMethod* referrer, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ ScopedQuickEntrypointChecks sqec(self); \
+ ArtField* field = FindFieldFast( \
+ field_idx, referrer, Static ## PrimitiveOrObject ## Write, \
+ sizeof(PrimitiveType)); \
+ if (LIKELY(field != nullptr)) { \
+ field->Set ## Kind <false>(field->GetDeclaringClass(), new_value); \
+ return 0; \
+ } \
+ if (IsObject) { \
+ StackHandleScope<1> hs(self); \
+ HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper( \
+ reinterpret_cast<mirror::Object**>(&new_value))); \
+ field = FindFieldFromCode<Static ## PrimitiveOrObject ## Write, true>( \
+ field_idx, referrer, self, sizeof(PrimitiveType)); \
+ } else { \
+ field = FindFieldFromCode<Static ## PrimitiveOrObject ## Write, true>( \
+ field_idx, referrer, self, sizeof(PrimitiveType)); \
+ } \
+ if (LIKELY(field != nullptr)) { \
+ field->Set ## Kind <false>(field->GetDeclaringClass(), new_value); \
+ return 0; \
+ } \
+ return -1; \
+ } \
+ \
+ extern "C" int artSet ## Kind ## InstanceFromCode(uint32_t field_idx, \
+ mirror::Object* obj, \
+ SetType new_value, \
+ ArtMethod* referrer, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ ScopedQuickEntrypointChecks sqec(self); \
+ ArtField* field = FindFieldFast( \
+ field_idx, referrer, Instance ## PrimitiveOrObject ## Write, \
+ sizeof(PrimitiveType)); \
+ if (LIKELY(field != nullptr && obj != nullptr)) { \
+ field->Set ## Kind <false>(obj, new_value); \
+ return 0; \
+ } \
+ if (IsObject) { \
+ StackHandleScope<1> hs(self); \
+ HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper( \
+ reinterpret_cast<mirror::Object**>(&new_value))); \
+ field = FindInstanceField<Instance ## PrimitiveOrObject ## Write, true>( \
+ field_idx, \
+ referrer, \
+ self, \
+ sizeof(PrimitiveType), \
+ &obj); \
+ } else { \
+ field = FindInstanceField<Instance ## PrimitiveOrObject ## Write, true>( \
+ field_idx, \
+ referrer, \
+ self, \
+ sizeof(PrimitiveType), \
+ &obj); \
+ } \
+ if (LIKELY(field != nullptr)) { \
+ field->Set ## Kind<false>(obj, new_value); \
+ return 0; \
+ } \
+ return -1; \
+ } \
+ \
+ extern "C" RetType artGet ## Kind ## StaticFromCompiledCode( \
+ uint32_t field_idx, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ return artGet ## Kind ## StaticFromCode( \
+ field_idx, GetReferrer(self), self); \
+ } \
+ \
+ extern "C" RetType artGet ## Kind ## InstanceFromCompiledCode( \
+ uint32_t field_idx, \
+ mirror::Object* obj, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ return artGet ## Kind ## InstanceFromCode( \
+ field_idx, obj, GetReferrer(self), self); \
+ } \
+ \
+ extern "C" int artSet ## Kind ## StaticFromCompiledCode( \
+ uint32_t field_idx, \
+ SetType new_value, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ return artSet ## Kind ## StaticFromCode( \
+ field_idx, new_value, GetReferrer(self), self); \
+ } \
+ \
+ extern "C" int artSet ## Kind ## InstanceFromCompiledCode( \
+ uint32_t field_idx, \
+ mirror::Object* obj, \
+ SetType new_value, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ return artSet ## Kind ## InstanceFromCode( \
+ field_idx, obj, new_value, GetReferrer(self), self); \
+ }
+
+ART_GET_FIELD_FROM_CODE(Byte, int8_t, ssize_t, uint32_t, Primitive, false, )
+ART_GET_FIELD_FROM_CODE(Boolean, int8_t, size_t, uint32_t, Primitive, false, )
+ART_GET_FIELD_FROM_CODE(Short, int16_t, ssize_t, uint16_t, Primitive, false, )
+ART_GET_FIELD_FROM_CODE(Char, int16_t, size_t, uint16_t, Primitive, false, )
+ART_GET_FIELD_FROM_CODE(32, int32_t, size_t, uint32_t, Primitive, false, )
+ART_GET_FIELD_FROM_CODE(64, int64_t, uint64_t, uint64_t, Primitive, false, )
+ART_GET_FIELD_FROM_CODE(Obj, mirror::HeapReference<mirror::Object>, mirror::Object*,
+ mirror::Object*, Object, true, .Ptr())
+
+
+// To cut on the number of entrypoints, we have shared entries for
+// byte/boolean and char/short for setting an instance or static field. We just
+// forward those to the unsigned variant.
+extern "C" int artSet8StaticFromCompiledCode(uint32_t field_idx,
+ uint32_t new_value,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
- if (LIKELY(field != nullptr)) {
- return field->GetBoolean(field->GetDeclaringClass());
- }
- field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int8_t));
- if (LIKELY(field != nullptr)) {
- return field->GetBoolean(field->GetDeclaringClass());
- }
- return 0; // Will throw exception by checking with Thread::Current.
+ return artSetBooleanStaticFromCode(field_idx, new_value, GetReferrer(self), self);
}
-extern "C" ssize_t artGetShortStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
- if (LIKELY(field != nullptr)) {
- return field->GetShort(field->GetDeclaringClass());
- }
- field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int16_t));
- if (LIKELY(field != nullptr)) {
- return field->GetShort(field->GetDeclaringClass());
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" size_t artGetCharStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
- if (LIKELY(field != nullptr)) {
- return field->GetChar(field->GetDeclaringClass());
- }
- field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int16_t));
- if (LIKELY(field != nullptr)) {
- return field->GetChar(field->GetDeclaringClass());
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" size_t artGet32StaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t));
- if (LIKELY(field != nullptr)) {
- return field->Get32(field->GetDeclaringClass());
- }
- field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int32_t));
- if (LIKELY(field != nullptr)) {
- return field->Get32(field->GetDeclaringClass());
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t));
- if (LIKELY(field != nullptr)) {
- return field->Get64(field->GetDeclaringClass());
- }
- field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int64_t));
- if (LIKELY(field != nullptr)) {
- return field->Get64(field->GetDeclaringClass());
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx,
- referrer,
- StaticObjectRead,
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != nullptr)) {
- return field->GetObj(field->GetDeclaringClass()).Ptr();
- }
- field = FindFieldFromCode<StaticObjectRead, true>(field_idx,
- referrer,
- self,
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != nullptr)) {
- return field->GetObj(field->GetDeclaringClass()).Ptr();
- }
- return nullptr; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" ssize_t artGetByteInstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
+extern "C" int artSet16StaticFromCompiledCode(uint32_t field_idx,
+ uint16_t new_value,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- return field->GetByte(obj);
- }
- field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
- referrer,
- self,
- sizeof(int8_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- return field->GetByte(obj);
- }
- return 0; // Will throw exception by checking with Thread::Current.
+ return artSetCharStaticFromCode(field_idx, new_value, GetReferrer(self), self);
}
-extern "C" size_t artGetBooleanInstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- return field->GetBoolean(obj);
- }
- field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
- referrer,
- self,
- sizeof(int8_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- return field->GetBoolean(obj);
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-extern "C" ssize_t artGetShortInstanceFromCode(uint32_t field_idx,
+extern "C" int artSet8InstanceFromCompiledCode(uint32_t field_idx,
mirror::Object* obj,
- ArtMethod* referrer,
+ uint8_t new_value,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- return field->GetShort(obj);
- }
- field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
- referrer,
- self,
- sizeof(int16_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- return field->GetShort(obj);
- }
- return 0; // Will throw exception by checking with Thread::Current.
+ return artSetBooleanInstanceFromCode(field_idx, obj, new_value, GetReferrer(self), self);
}
-extern "C" size_t artGetCharInstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
+extern "C" int artSet16InstanceFromCompiledCode(uint32_t field_idx,
+ mirror::Object* obj,
+ uint16_t new_value,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- return field->GetChar(obj);
- }
- field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
- referrer,
- self,
- sizeof(int16_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- return field->GetChar(obj);
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" size_t artGet32InstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- return field->Get32(obj);
- }
- field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
- referrer,
- self,
- sizeof(int32_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- return field->Get32(obj);
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- return field->Get64(obj);
- }
- field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
- referrer,
- self,
- sizeof(int64_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- return field->Get64(obj);
- }
- return 0; // Will throw exception by checking with Thread::Current.
-}
-
-extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx,
- referrer,
- InstanceObjectRead,
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- return field->GetObj(obj).Ptr();
- }
- field = FindInstanceField<InstanceObjectRead, true>(field_idx,
- referrer,
- self,
- sizeof(mirror::HeapReference<mirror::Object>),
- &obj);
- if (LIKELY(field != nullptr)) {
- return field->GetObj(obj).Ptr();
- }
- return nullptr; // Will throw exception by checking with Thread::Current.
+ return artSetCharInstanceFromCode(field_idx, obj, new_value, GetReferrer(self), self);
}
extern "C" int artSet8StaticFromCode(uint32_t field_idx,
@@ -317,32 +263,7 @@
ArtMethod* referrer,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int8_t));
- if (LIKELY(field != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimBoolean) {
- field->SetBoolean<false>(field->GetDeclaringClass(), new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimByte, type);
- field->SetByte<false>(field->GetDeclaringClass(), new_value);
- }
- return 0; // success
- }
- field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int8_t));
- if (LIKELY(field != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimBoolean) {
- field->SetBoolean<false>(field->GetDeclaringClass(), new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimByte, type);
- field->SetByte<false>(field->GetDeclaringClass(), new_value);
- }
- return 0; // success
- }
- return -1; // failure
+ return artSetBooleanStaticFromCode(field_idx, new_value, referrer, self);
}
extern "C" int artSet16StaticFromCode(uint32_t field_idx,
@@ -350,108 +271,7 @@
ArtMethod* referrer,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int16_t));
- if (LIKELY(field != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimChar) {
- field->SetChar<false>(field->GetDeclaringClass(), new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimShort, type);
- field->SetShort<false>(field->GetDeclaringClass(), new_value);
- }
- return 0; // success
- }
- field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int16_t));
- if (LIKELY(field != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimChar) {
- field->SetChar<false>(field->GetDeclaringClass(), new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimShort, type);
- field->SetShort<false>(field->GetDeclaringClass(), new_value);
- }
- return 0; // success
- }
- return -1; // failure
-}
-
-extern "C" int artSet32StaticFromCode(uint32_t field_idx,
- uint32_t new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t));
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set32<false>(field->GetDeclaringClass(), new_value);
- return 0; // success
- }
- field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int32_t));
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set32<false>(field->GetDeclaringClass(), new_value);
- return 0; // success
- }
- return -1; // failure
-}
-
-extern "C" int artSet64StaticFromCode(uint32_t field_idx,
- ArtMethod* referrer,
- uint64_t new_value,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(field->GetDeclaringClass(), new_value);
- return 0; // success
- }
- field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int64_t));
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(field->GetDeclaringClass(), new_value);
- return 0; // success
- }
- return -1; // failure
-}
-
-extern "C" int artSetObjStaticFromCode(uint32_t field_idx,
- mirror::Object* new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx,
- referrer,
- StaticObjectWrite,
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != nullptr)) {
- if (LIKELY(!field->IsPrimitiveType())) {
- // Compiled code can't use transactional mode.
- field->SetObj<false>(field->GetDeclaringClass(), new_value);
- return 0; // success
- }
- }
- {
- StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&new_value));
- field = FindFieldFromCode<StaticObjectWrite, true>(
- field_idx,
- referrer,
- self,
- sizeof(mirror::HeapReference<mirror::Object>));
- }
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->SetObj<false>(field->GetDeclaringClass(), new_value);
- return 0; // success
- }
- return -1; // failure
+ return artSetCharStaticFromCode(field_idx, new_value, referrer, self);
}
extern "C" int artSet8InstanceFromCode(uint32_t field_idx,
@@ -460,35 +280,7 @@
ArtMethod* referrer,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimBoolean) {
- field->SetBoolean<false>(obj, new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimByte, type);
- field->SetByte<false>(obj, new_value);
- }
- return 0; // success
- }
- field = FindInstanceField<InstancePrimitiveWrite, true>(field_idx,
- referrer,
- self,
- sizeof(int8_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimBoolean) {
- field->SetBoolean<false>(obj, new_value);
- } else {
- field->SetByte<false>(obj, new_value);
- }
- return 0; // success
- }
- return -1; // failure
+ return artSetBooleanInstanceFromCode(field_idx, obj, new_value, referrer, self);
}
extern "C" int artSet16InstanceFromCode(uint32_t field_idx,
@@ -497,126 +289,7 @@
ArtMethod* referrer,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int16_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimChar) {
- field->SetChar<false>(obj, new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimShort, type);
- field->SetShort<false>(obj, new_value);
- }
- return 0; // success
- }
- field = FindInstanceField<InstancePrimitiveWrite, true>(field_idx,
- referrer,
- self,
- sizeof(int16_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- // Compiled code can't use transactional mode.
- if (type == Primitive::kPrimChar) {
- field->SetChar<false>(obj, new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimShort, type);
- field->SetShort<false>(obj, new_value);
- }
- return 0; // success
- }
- return -1; // failure
-}
-
-extern "C" int artSet32InstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- uint32_t new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set32<false>(obj, new_value);
- return 0; // success
- }
- field = FindInstanceField<InstancePrimitiveWrite, true>(field_idx,
- referrer,
- self,
- sizeof(int32_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set32<false>(obj, new_value);
- return 0; // success
- }
- return -1; // failure
-}
-
-extern "C" int artSet64InstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- uint64_t new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(obj, new_value);
- return 0; // success
- }
- field = FindInstanceField<InstancePrimitiveWrite, true>(field_idx,
- referrer,
- self,
- sizeof(int64_t),
- &obj);
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(obj, new_value);
- return 0;
- }
- return -1; // failure
-}
-
-extern "C" int artSetObjInstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- mirror::Object* new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx,
- referrer,
- InstanceObjectWrite,
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != nullptr && obj != nullptr)) {
- // Compiled code can't use transactional mode.
- field->SetObj<false>(obj, new_value);
- return 0; // success
- }
- {
- StackHandleScope<2> hs(self);
- HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
- HandleWrapper<mirror::Object> h_new_value(hs.NewHandleWrapper(&new_value));
- field = FindFieldFromCode<InstanceObjectWrite, true>(
- field_idx,
- referrer,
- self,
- sizeof(mirror::HeapReference<mirror::Object>));
- }
- if (LIKELY(field != nullptr)) {
- if (UNLIKELY(obj == nullptr)) {
- ThrowNullPointerExceptionForFieldAccess(field, false);
- } else {
- // Compiled code can't use transactional mode.
- field->SetObj<false>(obj, new_value);
- return 0; // success
- }
- }
- return -1; // failure
+ return artSetCharInstanceFromCode(field_idx, obj, new_value, referrer, self);
}
extern "C" mirror::Object* artReadBarrierMark(mirror::Object* obj) {
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 670dadc..158c1d6 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -20,7 +20,7 @@
#include "indirect_reference_table.h"
#include "mirror/object-inl.h"
#include "thread-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index eb76fb6..bde9009 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -781,15 +781,19 @@
// If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization
// should be done and it knows the real return pc.
if (UNLIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) &&
- Dbg::IsForcedInterpreterNeededForUpcall(self, caller) &&
- Runtime::Current()->IsDeoptimizeable(caller_pc))) {
- // Push the context of the deoptimization stack so we can restore the return value and the
- // exception before executing the deoptimized frames.
- self->PushDeoptimizationContext(
- result, shorty[0] == 'L', /* from_code */ false, self->GetException());
+ Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) {
+ if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) {
+ LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
+ << caller->PrettyMethod();
+ } else {
+ // Push the context of the deoptimization stack so we can restore the return value and the
+ // exception before executing the deoptimized frames.
+ self->PushDeoptimizationContext(
+ result, shorty[0] == 'L', /* from_code */ false, self->GetException());
- // Set special exception to cause deoptimization.
- self->SetException(Thread::GetDeoptimizationException());
+ // Set special exception to cause deoptimization.
+ self->SetException(Thread::GetDeoptimizationException());
+ }
}
// No need to restore the args since the method has already been run by the interpreter.
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 8e84d76..d0687ce 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -152,7 +152,15 @@
void CheckQuickEntryPoints() {
CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pAllocArrayResolved) == 0,
QuickEntryPoints_start_with_allocarray_resoved);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayResolved, pAllocObjectResolved,
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayResolved, pAllocArrayResolved8,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayResolved8, pAllocArrayResolved16,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayResolved16, pAllocArrayResolved32,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayResolved32, pAllocArrayResolved64,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayResolved64, pAllocObjectResolved,
sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectResolved, pAllocObjectInitialized,
sizeof(void*));
diff --git a/runtime/exec_utils.cc b/runtime/exec_utils.cc
new file mode 100644
index 0000000..9efb1a3
--- /dev/null
+++ b/runtime/exec_utils.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "exec_utils.h"
+
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <string>
+#include <vector>
+
+#include "android-base/stringprintf.h"
+#include "android-base/strings.h"
+
+#include "runtime.h"
+
+namespace art {
+
+using android::base::StringAppendF;
+using android::base::StringPrintf;
+
+int ExecAndReturnCode(std::vector<std::string>& arg_vector, std::string* error_msg) {
+ const std::string command_line(android::base::Join(arg_vector, ' '));
+ CHECK_GE(arg_vector.size(), 1U) << command_line;
+
+ // Convert the args to char pointers.
+ const char* program = arg_vector[0].c_str();
+ std::vector<char*> args;
+ for (size_t i = 0; i < arg_vector.size(); ++i) {
+ const std::string& arg = arg_vector[i];
+ char* arg_str = const_cast<char*>(arg.c_str());
+ CHECK(arg_str != nullptr) << i;
+ args.push_back(arg_str);
+ }
+ args.push_back(nullptr);
+
+ // fork and exec
+ pid_t pid = fork();
+ if (pid == 0) {
+ // no allocation allowed between fork and exec
+
+ // change process groups, so we don't get reaped by ProcessManager
+ setpgid(0, 0);
+
+ // (b/30160149): protect subprocesses from modifications to LD_LIBRARY_PATH, etc.
+ // Use the snapshot of the environment from the time the runtime was created.
+ char** envp = (Runtime::Current() == nullptr) ? nullptr : Runtime::Current()->GetEnvSnapshot();
+ if (envp == nullptr) {
+ execv(program, &args[0]);
+ } else {
+ execve(program, &args[0], envp);
+ }
+ PLOG(ERROR) << "Failed to execve(" << command_line << ")";
+ // _exit to avoid atexit handlers in child.
+ _exit(1);
+ } else {
+ if (pid == -1) {
+ *error_msg = StringPrintf("Failed to execv(%s) because fork failed: %s",
+ command_line.c_str(), strerror(errno));
+ return -1;
+ }
+
+ // wait for subprocess to finish
+ int status = -1;
+ pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0));
+ if (got_pid != pid) {
+ *error_msg = StringPrintf("Failed after fork for execv(%s) because waitpid failed: "
+ "wanted %d, got %d: %s",
+ command_line.c_str(), pid, got_pid, strerror(errno));
+ return -1;
+ }
+ if (WIFEXITED(status)) {
+ return WEXITSTATUS(status);
+ }
+ return -1;
+ }
+}
+
+bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) {
+ int status = ExecAndReturnCode(arg_vector, error_msg);
+ if (status != 0) {
+ const std::string command_line(android::base::Join(arg_vector, ' '));
+ *error_msg = StringPrintf("Failed execv(%s) because non-0 exit status",
+ command_line.c_str());
+ return false;
+ }
+ return true;
+}
+
+} // namespace art
diff --git a/runtime/exec_utils.h b/runtime/exec_utils.h
new file mode 100644
index 0000000..093f7b8
--- /dev/null
+++ b/runtime/exec_utils.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_EXEC_UTILS_H_
+#define ART_RUNTIME_EXEC_UTILS_H_
+
+#include <string>
+#include <vector>
+
+namespace art {
+
+// Wrapper on fork/execv to run a command in a subprocess.
+// Both of these spawn child processes using the environment as it was set when the single instance
+// of the runtime (Runtime::Current()) was started. If no instance of the runtime was started, it
+// will use the current environment settings.
+bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg);
+int ExecAndReturnCode(std::vector<std::string>& arg_vector, std::string* error_msg);
+
+} // namespace art
+
+#endif // ART_RUNTIME_EXEC_UTILS_H_
diff --git a/runtime/experimental_flags.h b/runtime/experimental_flags.h
index 5ddb9fa..0471c96 100644
--- a/runtime/experimental_flags.h
+++ b/runtime/experimental_flags.h
@@ -26,8 +26,6 @@
// The actual flag values.
enum {
kNone = 0x0000,
- kAgents = 0x0001, // 0b00000001
- kRuntimePlugins = 0x0002, // 0b00000010
kMethodHandles = 0x0004, // 0b00000100
};
@@ -67,14 +65,6 @@
inline std::ostream& operator<<(std::ostream& stream, const ExperimentalFlags& e) {
bool started = false;
- if (e & ExperimentalFlags::kAgents) {
- stream << (started ? "|" : "") << "kAgents";
- started = true;
- }
- if (e & ExperimentalFlags::kRuntimePlugins) {
- stream << (started ? "|" : "") << "kRuntimePlugins";
- started = true;
- }
if (e & ExperimentalFlags::kMethodHandles) {
stream << (started ? "|" : "") << "kMethodHandles";
started = true;
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 7c64952..854d0a5 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -22,6 +22,7 @@
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "gc/space/region_space.h"
+#include "mirror/object-inl.h"
#include "lock_word.h"
namespace art {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 6044053..f12ad80 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -25,6 +25,7 @@
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/mod_union_table-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/gc_pause_listener.h"
#include "gc/reference_processor.h"
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
@@ -139,7 +140,7 @@
// Verify no from space refs. This causes a pause.
if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
- ScopedPause pause(this);
+ ScopedPause pause(this, false);
CheckEmptyMarkStack();
if (kVerboseMode) {
LOG(INFO) << "Verifying no from-space refs";
@@ -439,8 +440,27 @@
gc_barrier_->Init(self, 0);
ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
FlipCallback flip_callback(this);
+
+ // This is the point where Concurrent-Copying will pause all threads. We report a pause here, if
+ // necessary. This is slightly over-reporting, as this includes the time to actually suspend
+ // threads.
+ {
+ GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->StartPause();
+ }
+ }
+
size_t barrier_count = Runtime::Current()->FlipThreadRoots(
&thread_flip_visitor, &flip_callback, this);
+
+ {
+ GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->EndPause();
+ }
+ }
+
{
ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
gc_barrier_->Increment(self, barrier_count);
@@ -857,7 +877,10 @@
thread->ReadFlag(kEmptyCheckpointRequest)) {
// Found a runnable thread that hasn't responded to the empty checkpoint request.
// Assume it's stuck and safe to dump its stack.
- thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
+ thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT),
+ /*dump_native_stack*/ true,
+ /*backtrace_map*/ nullptr,
+ /*force_dump_stack*/ true);
}
}
}
@@ -1852,8 +1875,10 @@
// Scan ref fields of an object.
inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
- if (kDisallowReadBarrierDuringScan) {
+ if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
// Avoid all read barriers during visit references to help performance.
+ // Don't do this in transaction mode because we may read the old value of an field which may
+ // trigger read barriers.
Thread::Current()->ModifyDebugDisallowReadBarrier(1);
}
DCHECK(!region_space_->IsInFromSpace(to_ref));
@@ -1862,7 +1887,7 @@
// Disable the read barrier for a performance reason.
to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor, visitor);
- if (kDisallowReadBarrierDuringScan) {
+ if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
Thread::Current()->ModifyDebugDisallowReadBarrier(-1);
}
}
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 01bcb7d..14fd332 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -158,22 +158,26 @@
total_freed_bytes_ = 0;
}
-GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector)
- : start_time_(NanoTime()), collector_(collector) {
+GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector, bool with_reporting)
+ : start_time_(NanoTime()), collector_(collector), with_reporting_(with_reporting) {
Runtime* runtime = Runtime::Current();
runtime->GetThreadList()->SuspendAll(__FUNCTION__);
- GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
- if (pause_listener != nullptr) {
- pause_listener->StartPause();
+ if (with_reporting) {
+ GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->StartPause();
+ }
}
}
GarbageCollector::ScopedPause::~ScopedPause() {
collector_->RegisterPause(NanoTime() - start_time_);
Runtime* runtime = Runtime::Current();
- GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
- if (pause_listener != nullptr) {
- pause_listener->EndPause();
+ if (with_reporting_) {
+ GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->EndPause();
+ }
}
runtime->GetThreadList()->ResumeAll();
}
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 0177e2a..95601d7 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -126,12 +126,14 @@
public:
class SCOPED_LOCKABLE ScopedPause {
public:
- explicit ScopedPause(GarbageCollector* collector) EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_);
+ explicit ScopedPause(GarbageCollector* collector, bool with_reporting = true)
+ EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_);
~ScopedPause() UNLOCK_FUNCTION();
private:
const uint64_t start_time_;
GarbageCollector* const collector_;
+ bool with_reporting_;
};
GarbageCollector(Heap* heap, const std::string& name);
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 54f2210..394e541 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -34,7 +34,7 @@
#include "handle_scope-inl.h"
#include "thread-inl.h"
#include "utils.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 268cca0..051f3f7 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -78,6 +78,7 @@
#include "scoped_thread_state_change-inl.h"
#include "handle_scope-inl.h"
#include "thread_list.h"
+#include "verify_object-inl.h"
#include "well_known_classes.h"
namespace art {
@@ -127,8 +128,6 @@
// Dump the rosalloc stats on SIGQUIT.
static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
-static constexpr size_t kNativeAllocationHistogramBuckets = 16;
-
// Extra added to the heap growth multiplier. Used to adjust the GC ergonomics for the read barrier
// config.
static constexpr double kExtraHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
@@ -194,18 +193,12 @@
capacity_(capacity),
growth_limit_(growth_limit),
max_allowed_footprint_(initial_size),
- native_footprint_gc_watermark_(initial_size),
- native_need_to_run_finalization_(false),
concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
total_bytes_freed_ever_(0),
total_objects_freed_ever_(0),
num_bytes_allocated_(0),
- native_bytes_allocated_(0),
- native_histogram_lock_("Native allocation lock"),
- native_allocation_histogram_("Native allocation sizes",
- 1U,
- kNativeAllocationHistogramBuckets),
- native_free_histogram_("Native free sizes", 1U, kNativeAllocationHistogramBuckets),
+ new_native_bytes_allocated_(0),
+ old_native_bytes_allocated_(0),
num_bytes_freed_revoke_(0),
verify_missing_card_marks_(false),
verify_system_weaks_(false),
@@ -293,8 +286,13 @@
if (foreground_collector_type_ == kCollectorTypeCC) {
// Need to use a low address so that we can allocate a contiguous
// 2 * Xmx space when there's no image (dex2oat for target).
+#if defined(__LP64__)
CHECK_GE(300 * MB, non_moving_space_capacity);
requested_alloc_space_begin = reinterpret_cast<uint8_t*>(300 * MB) - non_moving_space_capacity;
+#else
+ // For 32-bit, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
+ requested_alloc_space_begin = reinterpret_cast<uint8_t*>(0x20000000);
+#endif
}
// Load image space(s).
@@ -369,7 +367,12 @@
&error_str));
CHECK(non_moving_space_mem_map != nullptr) << error_str;
// Try to reserve virtual memory at a lower address if we have a separate non moving space.
+#if defined(__LP64__)
request_begin = reinterpret_cast<uint8_t*>(300 * MB);
+#else
+ // For 32-bit, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
+ request_begin = reinterpret_cast<uint8_t*>(0x20000000) + non_moving_space_capacity;
+#endif
}
// Attempt to create 2 mem maps at or after the requested begin.
if (foreground_collector_type_ != kCollectorTypeCC) {
@@ -534,6 +537,12 @@
gc_complete_lock_ = new Mutex("GC complete lock");
gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
*gc_complete_lock_));
+ native_blocking_gc_lock_ = new Mutex("Native blocking GC lock");
+ native_blocking_gc_cond_.reset(new ConditionVariable("Native blocking GC condition variable",
+ *native_blocking_gc_lock_));
+ native_blocking_gc_in_progress_ = false;
+ native_blocking_gcs_finished_ = 0;
+
thread_flip_lock_ = new Mutex("GC thread flip lock");
thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
*thread_flip_lock_));
@@ -1101,19 +1110,9 @@
rosalloc_space_->DumpStats(os);
}
- {
- MutexLock mu(Thread::Current(), native_histogram_lock_);
- if (native_allocation_histogram_.SampleSize() > 0u) {
- os << "Histogram of native allocation ";
- native_allocation_histogram_.DumpBins(os);
- os << " bucket size " << native_allocation_histogram_.BucketWidth() << "\n";
- }
- if (native_free_histogram_.SampleSize() > 0u) {
- os << "Histogram of native free ";
- native_free_histogram_.DumpBins(os);
- os << " bucket size " << native_free_histogram_.BucketWidth() << "\n";
- }
- }
+ os << "Registered native bytes allocated: "
+ << old_native_bytes_allocated_.LoadRelaxed() + new_native_bytes_allocated_.LoadRelaxed()
+ << "\n";
BaseMutex::DumpAll(os);
}
@@ -1198,6 +1197,7 @@
STLDeleteElements(&continuous_spaces_);
STLDeleteElements(&discontinuous_spaces_);
delete gc_complete_lock_;
+ delete native_blocking_gc_lock_;
delete thread_flip_lock_;
delete pending_task_lock_;
delete backtrace_lock_;
@@ -2645,6 +2645,13 @@
// Approximate heap size.
ATRACE_INT("Heap size (KB)", bytes_allocated_before_gc / KB);
+ if (gc_type == NonStickyGcType()) {
+ // Move all bytes from new_native_bytes_allocated_ to
+ // old_native_bytes_allocated_ now that GC has been triggered, resetting
+ // new_native_bytes_allocated_ to zero in the process.
+ old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0));
+ }
+
DCHECK_LT(gc_type, collector::kGcTypeMax);
DCHECK_NE(gc_type, collector::kGcTypeNone);
@@ -3352,7 +3359,7 @@
void Heap::PreGcVerification(collector::GarbageCollector* gc) {
if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
- collector::GarbageCollector::ScopedPause pause(gc);
+ collector::GarbageCollector::ScopedPause pause(gc, false);
PreGcVerificationPaused(gc);
}
}
@@ -3420,7 +3427,7 @@
void Heap::PostGcVerification(collector::GarbageCollector* gc) {
if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
- collector::GarbageCollector::ScopedPause pause(gc);
+ collector::GarbageCollector::ScopedPause pause(gc, false);
PostGcVerificationPaused(gc);
}
}
@@ -3504,18 +3511,6 @@
return false;
}
-void Heap::UpdateMaxNativeFootprint() {
- size_t native_size = native_bytes_allocated_.LoadRelaxed();
- // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
- size_t target_size = native_size / GetTargetHeapUtilization();
- if (target_size > native_size + max_free_) {
- target_size = native_size + max_free_;
- } else if (target_size < native_size + min_free_) {
- target_size = native_size + min_free_;
- }
- native_footprint_gc_watermark_ = std::min(growth_limit_, target_size);
-}
-
collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
for (const auto& collector : garbage_collectors_) {
if (collector->GetCollectorType() == collector_type_ &&
@@ -3543,8 +3538,11 @@
collector::GcType gc_type = collector_ran->GetGcType();
const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
// foreground.
- const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
- const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
+ // Ensure at least 2.5 MB to temporarily fix excessive GC caused by TLAB ergonomics.
+ const uint64_t adjusted_min_free = std::max(static_cast<uint64_t>(min_free_ * multiplier),
+ static_cast<uint64_t>(5 * MB / 2));
+ const uint64_t adjusted_max_free = std::max(static_cast<uint64_t>(max_free_ * multiplier),
+ static_cast<uint64_t>(5 * MB / 2));
if (gc_type != collector::kGcTypeSticky) {
// Grow the heap for non sticky GC.
ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
@@ -3552,11 +3550,9 @@
target_size = bytes_allocated + delta * multiplier;
target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
- native_need_to_run_finalization_ = true;
next_gc_type_ = collector::kGcTypeSticky;
} else {
- collector::GcType non_sticky_gc_type =
- HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
+ collector::GcType non_sticky_gc_type = NonStickyGcType();
// Find what the next non sticky collector will be.
collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
// If the throughput of the current sticky GC >= throughput of the non sticky collector, then
@@ -3707,7 +3703,7 @@
collector::GcType next_gc_type = next_gc_type_;
// If forcing full and next gc type is sticky, override with a non-sticky type.
if (force_full && next_gc_type == collector::kGcTypeSticky) {
- next_gc_type = HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
+ next_gc_type = NonStickyGcType();
}
if (CollectGarbageInternal(next_gc_type, kGcCauseBackground, false) ==
collector::kGcTypeNone) {
@@ -3864,70 +3860,79 @@
}
void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
- Thread* self = ThreadForEnv(env);
- {
- MutexLock mu(self, native_histogram_lock_);
- native_allocation_histogram_.AddValue(bytes);
- }
- if (native_need_to_run_finalization_) {
- RunFinalization(env, kNativeAllocationFinalizeTimeout);
- UpdateMaxNativeFootprint();
- native_need_to_run_finalization_ = false;
- }
- // Total number of native bytes allocated.
- size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
- new_native_bytes_allocated += bytes;
- if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
- collector::GcType gc_type = HasZygoteSpace() ? collector::kGcTypePartial :
- collector::kGcTypeFull;
+ // See the REDESIGN section of go/understanding-register-native-allocation
+ // for an explanation of how RegisterNativeAllocation works.
+ size_t new_value = bytes + new_native_bytes_allocated_.FetchAndAddRelaxed(bytes);
+ if (new_value > NativeAllocationBlockingGcWatermark()) {
+ // Wait for a new GC to finish and finalizers to run, because the
+ // allocation rate is too high.
+ Thread* self = ThreadForEnv(env);
- // The second watermark is higher than the gc watermark. If you hit this it means you are
- // allocating native objects faster than the GC can keep up with.
- if (new_native_bytes_allocated > growth_limit_) {
- if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
- // Just finished a GC, attempt to run finalizers.
- RunFinalization(env, kNativeAllocationFinalizeTimeout);
- CHECK(!env->ExceptionCheck());
- // Native bytes allocated may be updated by finalization, refresh it.
- new_native_bytes_allocated = native_bytes_allocated_.LoadRelaxed();
+ bool run_gc = false;
+ {
+ MutexLock mu(self, *native_blocking_gc_lock_);
+ uint32_t initial_gcs_finished = native_blocking_gcs_finished_;
+ if (native_blocking_gc_in_progress_) {
+ // A native blocking GC is in progress from the last time the native
+ // allocation blocking GC watermark was exceeded. Wait for that GC to
+ // finish before addressing the fact that we exceeded the blocking
+ // watermark again.
+ do {
+ native_blocking_gc_cond_->Wait(self);
+ } while (native_blocking_gcs_finished_ == initial_gcs_finished);
+ initial_gcs_finished++;
}
- // If we still are over the watermark, attempt a GC for alloc and run finalizers.
- if (new_native_bytes_allocated > growth_limit_) {
- CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
- RunFinalization(env, kNativeAllocationFinalizeTimeout);
- native_need_to_run_finalization_ = false;
- CHECK(!env->ExceptionCheck());
- }
- // We have just run finalizers, update the native watermark since it is very likely that
- // finalizers released native managed allocations.
- UpdateMaxNativeFootprint();
- } else if (!IsGCRequestPending()) {
- if (IsGcConcurrent()) {
- RequestConcurrentGC(self, true); // Request non-sticky type.
+
+ // It's possible multiple threads have seen that we exceeded the
+ // blocking watermark. Ensure that only one of those threads runs the
+ // blocking GC. The rest of the threads should instead wait for the
+ // blocking GC to complete.
+ if (native_blocking_gc_in_progress_) {
+ do {
+ native_blocking_gc_cond_->Wait(self);
+ } while (native_blocking_gcs_finished_ == initial_gcs_finished);
} else {
- CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
+ native_blocking_gc_in_progress_ = true;
+ run_gc = true;
}
}
+
+ if (run_gc) {
+ CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
+ RunFinalization(env, kNativeAllocationFinalizeTimeout);
+ CHECK(!env->ExceptionCheck());
+
+ MutexLock mu(self, *native_blocking_gc_lock_);
+ native_blocking_gc_in_progress_ = false;
+ native_blocking_gcs_finished_++;
+ native_blocking_gc_cond_->Broadcast(self);
+ }
+ } else if (new_value > NativeAllocationGcWatermark() && !IsGCRequestPending()) {
+ // Trigger another GC because there have been enough native bytes
+ // allocated since the last GC.
+ if (IsGcConcurrent()) {
+ RequestConcurrentGC(ThreadForEnv(env), /*force_full*/true);
+ } else {
+ CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
+ }
}
}
-void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
- size_t expected_size;
- {
- MutexLock mu(Thread::Current(), native_histogram_lock_);
- native_free_histogram_.AddValue(bytes);
- }
+void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
+ // Take the bytes freed out of new_native_bytes_allocated_ first. If
+ // new_native_bytes_allocated_ reaches zero, take the remaining bytes freed
+ // out of old_native_bytes_allocated_ to ensure all freed bytes are
+ // accounted for.
+ size_t allocated;
+ size_t new_freed_bytes;
do {
- expected_size = native_bytes_allocated_.LoadRelaxed();
- if (UNLIKELY(bytes > expected_size)) {
- ScopedObjectAccess soa(env);
- env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
- StringPrintf("Attempted to free %zd native bytes with only %zd native bytes "
- "registered as allocated", bytes, expected_size).c_str());
- break;
- }
- } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size,
- expected_size - bytes));
+ allocated = new_native_bytes_allocated_.LoadRelaxed();
+ new_freed_bytes = std::min(allocated, bytes);
+ } while (!new_native_bytes_allocated_.CompareExchangeWeakRelaxed(allocated,
+ allocated - new_freed_bytes));
+ if (new_freed_bytes < bytes) {
+ old_native_bytes_allocated_.FetchAndSubRelaxed(bytes - new_freed_bytes);
+ }
}
size_t Heap::GetTotalMemory() const {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 3a8e29b..a4d300b 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -260,9 +260,8 @@
REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
- void RegisterNativeFree(JNIEnv* env, size_t bytes)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*native_blocking_gc_lock_);
+ void RegisterNativeFree(JNIEnv* env, size_t bytes);
// Change the allocator, updates entrypoints.
void ChangeAllocator(AllocatorType allocator)
@@ -562,7 +561,7 @@
space::Space* FindSpaceFromAddress(const void* ptr) const
REQUIRES_SHARED(Locks::mutator_lock_);
- void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
+ void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
// Do a pending collector transition.
void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
@@ -679,7 +678,7 @@
// GC performance measuring
void DumpGcPerformanceInfo(std::ostream& os)
- REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
+ REQUIRES(!*gc_complete_lock_);
void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
// Thread pool.
@@ -979,10 +978,6 @@
void PostGcVerificationPaused(collector::GarbageCollector* gc)
REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
- // Update the watermark for the native allocated bytes based on the current number of native
- // bytes allocated and the target utilization ratio.
- void UpdateMaxNativeFootprint();
-
// Find a collector based on GC type.
collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
@@ -1066,6 +1061,31 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
+ collector::GcType NonStickyGcType() const {
+ return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
+ }
+
+ // How large new_native_bytes_allocated_ can grow before we trigger a new
+ // GC.
+ ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
+ // Reuse max_free_ for the native allocation gc watermark, so that the
+ // native heap is treated in the same way as the Java heap in the case
+ // where the gc watermark update would exceed max_free_. Using max_free_
+ // instead of the target utilization means the watermark doesn't depend on
+ // the current number of registered native allocations.
+ return max_free_;
+ }
+
+ // How large new_native_bytes_allocated_ can grow while GC is in progress
+ // before we block the allocating thread to allow GC to catch up.
+ ALWAYS_INLINE size_t NativeAllocationBlockingGcWatermark() const {
+ // Historically the native allocations were bounded by growth_limit_. This
+ // uses that same value, dividing growth_limit_ by 2 to account for
+ // the fact that now the bound is relative to the number of retained
+ // registered native allocations rather than absolute.
+ return growth_limit_ / 2;
+ }
+
// All-known continuous spaces, where objects lie within fixed bounds.
std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
@@ -1184,12 +1204,6 @@
// a GC should be triggered.
size_t max_allowed_footprint_;
- // The watermark at which a concurrent GC is requested by registerNativeAllocation.
- size_t native_footprint_gc_watermark_;
-
- // Whether or not we need to run finalizers in the next native allocation.
- bool native_need_to_run_finalization_;
-
// When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
// it completes ahead of an allocation failing.
size_t concurrent_start_bytes_;
@@ -1203,13 +1217,25 @@
// Number of bytes allocated. Adjusted after each allocation and free.
Atomic<size_t> num_bytes_allocated_;
- // Bytes which are allocated and managed by native code but still need to be accounted for.
- Atomic<size_t> native_bytes_allocated_;
+ // Number of registered native bytes allocated since the last time GC was
+ // triggered. Adjusted after each RegisterNativeAllocation and
+ // RegisterNativeFree. Used to determine when to trigger GC for native
+ // allocations.
+ // See the REDESIGN section of go/understanding-register-native-allocation.
+ Atomic<size_t> new_native_bytes_allocated_;
- // Native allocation stats.
- Mutex native_histogram_lock_;
- Histogram<uint64_t> native_allocation_histogram_;
- Histogram<uint64_t> native_free_histogram_;
+ // Number of registered native bytes allocated prior to the last time GC was
+ // triggered, for debugging purposes. The current number of registered
+ // native bytes is determined by taking the sum of
+ // old_native_bytes_allocated_ and new_native_bytes_allocated_.
+ Atomic<size_t> old_native_bytes_allocated_;
+
+ // Used for synchronization of blocking GCs triggered by
+ // RegisterNativeAllocation.
+ Mutex* native_blocking_gc_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::unique_ptr<ConditionVariable> native_blocking_gc_cond_ GUARDED_BY(native_blocking_gc_lock_);
+ bool native_blocking_gc_in_progress_ GUARDED_BY(native_blocking_gc_lock_);
+ uint32_t native_blocking_gcs_finished_ GUARDED_BY(native_blocking_gc_lock_);
// Number of bytes freed by thread local buffer revokes. This will
// cancel out the ahead-of-time bulk counting of bytes allocated in
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index e03958d..ffbca52 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -32,6 +32,7 @@
#include "base/scoped_flock.h"
#include "base/systrace.h"
#include "base/time_utils.h"
+#include "exec_utils.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "image-inl.h"
#include "image_space_fs.h"
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index b212d09..077f45e 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -23,7 +23,7 @@
#include "handle.h"
#include "obj_ptr-inl.h"
#include "thread-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc
index aab1d9c..f888482 100644
--- a/runtime/handle_scope_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -17,10 +17,12 @@
#include <type_traits>
#include "base/enums.h"
+#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "gtest/gtest.h"
#include "handle.h"
#include "handle_scope-inl.h"
+#include "mirror/class-inl.h"
#include "mirror/object.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index fe6a6e9..3d3ad59 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -1169,7 +1169,7 @@
}
void Hprof::DumpHeapClass(mirror::Class* klass) {
- if (!klass->IsResolved() && !klass->IsErroneous()) {
+ if (!klass->IsResolved()) {
// Class is allocated but not yet resolved: we cannot access its fields or super class.
return;
}
diff --git a/runtime/image.cc b/runtime/image.cc
index 6d88895..54b099e 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -25,7 +25,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '5', '\0' }; // ArtMethod update
+const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '6', '\0' }; // Erroneous resolved class.
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index 0e66ae9..24ee227 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -25,7 +25,7 @@
#include "gc_root-inl.h"
#include "obj_ptr-inl.h"
#include "runtime-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
namespace mirror {
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index c737119..9fbb2e9 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -25,7 +25,6 @@
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "utils.h"
-#include "verify_object-inl.h"
#include <cstdlib>
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 4ea1130..f11e2cb 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -88,11 +88,11 @@
}
void Instrumentation::InstallStubsForClass(mirror::Class* klass) {
- if (klass->IsErroneous()) {
- // We can't execute code in a erroneous class: do nothing.
- } else if (!klass->IsResolved()) {
+ if (!klass->IsResolved()) {
// We need the class to be resolved to install/uninstall stubs. Otherwise its methods
// could not be initialized or linked with regards to class inheritance.
+ } else if (klass->IsErroneousResolved()) {
+ // We can't execute code in a erroneous class: do nothing.
} else {
for (ArtMethod& method : klass->GetMethods(kRuntimePointerSize)) {
InstallStubsForMethod(&method);
@@ -105,10 +105,9 @@
method->SetEntryPointFromQuickCompiledCode(quick_code);
}
-bool Instrumentation::NeedDebugVersionForBootImageCode(ArtMethod* method, const void* code) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
+bool Instrumentation::NeedDebugVersionFor(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) {
return Dbg::IsDebuggerActive() &&
- Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
+ Runtime::Current()->IsJavaDebuggable() &&
!method->IsNative() &&
!method->IsProxyMethod();
}
@@ -132,9 +131,10 @@
if ((forced_interpret_only_ || IsDeoptimized(method)) && !method->IsNative()) {
new_quick_code = GetQuickToInterpreterBridge();
} else if (is_class_initialized || !method->IsStatic() || method->IsConstructor()) {
- new_quick_code = class_linker->GetQuickOatCodeFor(method);
- if (NeedDebugVersionForBootImageCode(method, new_quick_code)) {
+ if (NeedDebugVersionFor(method)) {
new_quick_code = GetQuickToInterpreterBridge();
+ } else {
+ new_quick_code = class_linker->GetQuickOatCodeFor(method);
}
} else {
new_quick_code = GetQuickResolutionStub();
@@ -148,13 +148,14 @@
// class, all its static methods code will be set to the instrumentation entry point.
// For more details, see ClassLinker::FixupStaticTrampolines.
if (is_class_initialized || !method->IsStatic() || method->IsConstructor()) {
- new_quick_code = class_linker->GetQuickOatCodeFor(method);
- if (NeedDebugVersionForBootImageCode(method, new_quick_code)) {
+ if (NeedDebugVersionFor(method)) {
// Oat code should not be used. Don't install instrumentation stub and
// use interpreter for instrumentation.
new_quick_code = GetQuickToInterpreterBridge();
} else if (entry_exit_stubs_installed_) {
new_quick_code = GetQuickInstrumentationEntryPoint();
+ } else {
+ new_quick_code = class_linker->GetQuickOatCodeFor(method);
}
} else {
new_quick_code = GetQuickResolutionStub();
@@ -557,10 +558,8 @@
}
Instrumentation::InstrumentationLevel Instrumentation::GetCurrentInstrumentationLevel() const {
- if (interpreter_stubs_installed_ && interpret_only_) {
+ if (interpreter_stubs_installed_) {
return InstrumentationLevel::kInstrumentWithInterpreter;
- } else if (interpreter_stubs_installed_) {
- return InstrumentationLevel::kInstrumentWithInterpreterAndJit;
} else if (entry_exit_stubs_installed_) {
return InstrumentationLevel::kInstrumentWithInstrumentationStubs;
} else {
@@ -569,11 +568,8 @@
}
bool Instrumentation::RequiresInstrumentationInstallation(InstrumentationLevel new_level) const {
- // We need to reinstall instrumentation if we go to a different level or if the current level is
- // kInstrumentWithInterpreterAndJit since that level does not force all code to always use the
- // interpreter and so we might have started running optimized code again.
- return new_level == InstrumentationLevel::kInstrumentWithInterpreterAndJit ||
- GetCurrentInstrumentationLevel() != new_level;
+ // We need to reinstall instrumentation if we go to a different level.
+ return GetCurrentInstrumentationLevel() != new_level;
}
void Instrumentation::ConfigureStubs(const char* key, InstrumentationLevel desired_level) {
@@ -604,7 +600,7 @@
Locks::mutator_lock_->AssertExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
if (requested_level > InstrumentationLevel::kInstrumentNothing) {
- if (requested_level >= InstrumentationLevel::kInstrumentWithInterpreterAndJit) {
+ if (requested_level == InstrumentationLevel::kInstrumentWithInterpreter) {
interpreter_stubs_installed_ = true;
entry_exit_stubs_installed_ = true;
} else {
@@ -731,10 +727,12 @@
UpdateMethodsCodeImpl(method, quick_code);
}
-void Instrumentation::UpdateMethodsCodeFromDebugger(ArtMethod* method, const void* quick_code) {
- // When debugger attaches, we may update the entry points of all methods of a class
- // to the interpreter bridge. A method's declaring class might not be in resolved
- // state yet in that case.
+void Instrumentation::UpdateMethodsCodeForJavaDebuggable(ArtMethod* method,
+ const void* quick_code) {
+ // When the runtime is set to Java debuggable, we may update the entry points of
+ // all methods of a class to the interpreter bridge. A method's declaring class
+ // might not be in resolved state yet in that case, so we bypass the DCHECK in
+ // UpdateMethodsCode.
UpdateMethodsCodeImpl(method, quick_code);
}
@@ -819,10 +817,9 @@
!method->GetDeclaringClass()->IsInitialized()) {
UpdateEntrypoints(method, GetQuickResolutionStub());
} else {
- const void* quick_code = class_linker->GetQuickOatCodeFor(method);
- if (NeedDebugVersionForBootImageCode(method, quick_code)) {
- quick_code = GetQuickToInterpreterBridge();
- }
+ const void* quick_code = NeedDebugVersionFor(method)
+ ? GetQuickToInterpreterBridge()
+ : class_linker->GetQuickOatCodeFor(method);
UpdateEntrypoints(method, quick_code);
}
@@ -879,14 +876,6 @@
return !deoptimization_enabled_ && !interpreter_stubs_installed_;
}
-// TODO we don't check deoptimization_enabled_ because currently there isn't really any support for
-// multiple users of instrumentation. Since this is just a temporary state anyway pending work to
-// ensure that the current_method doesn't get kept across suspend points this should be okay.
-// TODO Remove once b/33630159 is resolved.
-void Instrumentation::ReJitEverything(const char* key) {
- ConfigureStubs(key, InstrumentationLevel::kInstrumentWithInterpreterAndJit);
-}
-
void Instrumentation::DeoptimizeEverything(const char* key) {
CHECK(deoptimization_enabled_);
ConfigureStubs(key, InstrumentationLevel::kInstrumentWithInterpreter);
@@ -1114,7 +1103,7 @@
bool deoptimize = (visitor.caller != nullptr) &&
(interpreter_stubs_installed_ || IsDeoptimized(visitor.caller) ||
Dbg::IsForcedInterpreterNeededForUpcall(self, visitor.caller));
- if (deoptimize && Runtime::Current()->IsDeoptimizeable(*return_pc)) {
+ if (deoptimize && Runtime::Current()->IsAsyncDeoptimizeable(*return_pc)) {
if (kVerboseInstrumentation) {
LOG(INFO) << "Deoptimizing "
<< visitor.caller->PrettyMethod()
@@ -1132,6 +1121,10 @@
return GetTwoWordSuccessValue(*return_pc,
reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
} else {
+ if (deoptimize && !Runtime::Current()->IsAsyncDeoptimizeable(*return_pc)) {
+ LOG(WARNING) << "Got a deoptimization request on un-deoptimizable " << method->PrettyMethod()
+ << " at PC " << reinterpret_cast<void*>(*return_pc);
+ }
if (kVerboseInstrumentation) {
LOG(INFO) << "Returning from " << method->PrettyMethod()
<< " to PC " << reinterpret_cast<void*>(*return_pc);
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 05c0aaa..01071a5 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -133,9 +133,6 @@
enum class InstrumentationLevel {
kInstrumentNothing, // execute without instrumentation
kInstrumentWithInstrumentationStubs, // execute with instrumentation entry/exit stubs
- kInstrumentWithInterpreterAndJit, // execute with interpreter initially and later the JIT
- // (if it is enabled). This level is special in that it
- // always requires re-instrumentation.
kInstrumentWithInterpreter // execute with interpreter
};
@@ -166,13 +163,6 @@
}
bool ShouldNotifyMethodEnterExitEvents() const REQUIRES_SHARED(Locks::mutator_lock_);
- // Executes everything with the interpreter/jit (if available).
- void ReJitEverything(const char* key)
- REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
- REQUIRES(!Locks::thread_list_lock_,
- !Locks::classlinker_classes_lock_,
- !deoptimized_methods_lock_);
-
// Executes everything with interpreter.
void DeoptimizeEverything(const char* key)
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
@@ -239,7 +229,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Update the code of a method respecting any installed stubs from debugger.
- void UpdateMethodsCodeFromDebugger(ArtMethod* method, const void* quick_code)
+ void UpdateMethodsCodeForJavaDebuggable(ArtMethod* method, const void* quick_code)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Get the quick code for the given method. More efficient than asking the class linker as it
@@ -264,7 +254,7 @@
// Code is in boot image oat file which isn't compiled as debuggable.
// Need debug version (interpreter or jitted) if that's the case.
- bool NeedDebugVersionForBootImageCode(ArtMethod* method, const void* code) const
+ bool NeedDebugVersionFor(ArtMethod* method) const
REQUIRES_SHARED(Locks::mutator_lock_);
bool AreExitStubsInstalled() const {
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index aeb438f..7ef3508 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -255,17 +255,11 @@
}
}
ArtMethod* method = shadow_frame.GetMethod();
- // MethodVerifier refuses methods with string_idx out of bounds.
- DCHECK_LT(string_idx.index_ % mirror::DexCache::kDexCacheStringCacheSize,
- method->GetDexFile()->NumStringIds());
- ObjPtr<mirror::String> string_ptr =
- mirror::StringDexCachePair::Lookup(method->GetDexCache()->GetStrings(),
- string_idx.index_,
- mirror::DexCache::kDexCacheStringCacheSize).Read();
+ ObjPtr<mirror::String> string_ptr = method->GetDexCache()->GetResolvedString(string_idx);
if (UNLIKELY(string_ptr == nullptr)) {
StackHandleScope<1> hs(self);
Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
- string_ptr = Runtime::Current()->GetClassLinker()->ResolveString(*method->GetDexFile(),
+ string_ptr = Runtime::Current()->GetClassLinker()->ResolveString(*dex_cache->GetDexFile(),
string_idx,
dex_cache);
}
diff --git a/runtime/interpreter/mterp/mips64/bincmp.S b/runtime/interpreter/mterp/mips64/bincmp.S
index 07b1210..c2bca91 100644
--- a/runtime/interpreter/mterp/mips64/bincmp.S
+++ b/runtime/interpreter/mterp/mips64/bincmp.S
@@ -6,7 +6,6 @@
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
- .extern MterpProfileBranch
ext a2, rINST, 8, 4 # a2 <- A
ext a3, rINST, 12, 4 # a3 <- B
lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
diff --git a/runtime/interpreter/mterp/mips64/op_packed_switch.S b/runtime/interpreter/mterp/mips64/op_packed_switch.S
index 27ce580..44e77a4 100644
--- a/runtime/interpreter/mterp/mips64/op_packed_switch.S
+++ b/runtime/interpreter/mterp/mips64/op_packed_switch.S
@@ -10,7 +10,6 @@
*/
/* op vAA, +BBBBBBBB */
.extern $func
- .extern MterpProfileBranch
lh a0, 2(rPC) # a0 <- bbbb (lo)
lh a1, 4(rPC) # a1 <- BBBB (hi)
srl a3, rINST, 8 # a3 <- AA
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 369c261..75ab91a 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -768,38 +768,32 @@
return MterpSetUpHotnessCountdown(method, shadow_frame);
}
-// TUNING: Unused by arm/arm64/x86/x86_64. Remove when mips/mips64 mterps support batch updates.
-extern "C" size_t MterpProfileBranch(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* method = shadow_frame->GetMethod();
- JValue* result = shadow_frame->GetResultRegister();
- uint32_t dex_pc = shadow_frame->GetDexPC();
- jit::Jit* jit = Runtime::Current()->GetJit();
- if ((jit != nullptr) && (offset <= 0)) {
- jit->AddSamples(self, method, 1, /*with_backedges*/ true);
- }
- int16_t countdown_value = MterpSetUpHotnessCountdown(method, shadow_frame);
- if (countdown_value == jit::kJitCheckForOSR) {
- return jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
- } else {
- return false;
- }
-}
-
extern "C" size_t MterpMaybeDoOnStackReplacement(Thread* self,
ShadowFrame* shadow_frame,
int32_t offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* method = shadow_frame->GetMethod();
- JValue* result = shadow_frame->GetResultRegister();
- uint32_t dex_pc = shadow_frame->GetDexPC();
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (offset <= 0) {
- // Keep updating hotness in case a compilation request was dropped. Eventually it will retry.
- jit->AddSamples(self, method, 1, /*with_backedges*/ true);
+ int16_t osr_countdown = shadow_frame->GetCachedHotnessCountdown() - 1;
+ bool did_osr = false;
+ /*
+ * To reduce the cost of polling the compiler to determine whether the requested OSR
+ * compilation has completed, only check every Nth time. NOTE: the "osr_countdown <= 0"
+ * condition is satisfied either by the decrement below or the initial setting of
+ * the cached countdown field to kJitCheckForOSR, which elsewhere is asserted to be -1.
+ */
+ if (osr_countdown <= 0) {
+ ArtMethod* method = shadow_frame->GetMethod();
+ JValue* result = shadow_frame->GetResultRegister();
+ uint32_t dex_pc = shadow_frame->GetDexPC();
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ osr_countdown = jit::Jit::kJitRecheckOSRThreshold;
+ if (offset <= 0) {
+ // Keep updating hotness in case a compilation request was dropped. Eventually it will retry.
+ jit->AddSamples(self, method, osr_countdown, /*with_backedges*/ true);
+ }
+ did_osr = jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
}
- // Assumes caller has already determined that an OSR check is appropriate.
- return jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
+ shadow_frame->SetCachedHotnessCountdown(osr_countdown);
+ return did_osr;
}
} // namespace interpreter
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index bf09666..013bb32 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -1174,7 +1174,6 @@
*/
/* op vAA, +BBBBBBBB */
.extern MterpDoPackedSwitch
- .extern MterpProfileBranch
lh a0, 2(rPC) # a0 <- bbbb (lo)
lh a1, 4(rPC) # a1 <- BBBB (hi)
srl a3, rINST, 8 # a3 <- AA
@@ -1201,7 +1200,6 @@
*/
/* op vAA, +BBBBBBBB */
.extern MterpDoSparseSwitch
- .extern MterpProfileBranch
lh a0, 2(rPC) # a0 <- bbbb (lo)
lh a1, 4(rPC) # a1 <- BBBB (hi)
srl a3, rINST, 8 # a3 <- AA
@@ -1396,7 +1394,6 @@
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
- .extern MterpProfileBranch
ext a2, rINST, 8, 4 # a2 <- A
ext a3, rINST, 12, 4 # a3 <- B
lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
@@ -1423,7 +1420,6 @@
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
- .extern MterpProfileBranch
ext a2, rINST, 8, 4 # a2 <- A
ext a3, rINST, 12, 4 # a3 <- B
lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
@@ -1450,7 +1446,6 @@
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
- .extern MterpProfileBranch
ext a2, rINST, 8, 4 # a2 <- A
ext a3, rINST, 12, 4 # a3 <- B
lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
@@ -1477,7 +1472,6 @@
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
- .extern MterpProfileBranch
ext a2, rINST, 8, 4 # a2 <- A
ext a3, rINST, 12, 4 # a3 <- B
lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
@@ -1504,7 +1498,6 @@
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
- .extern MterpProfileBranch
ext a2, rINST, 8, 4 # a2 <- A
ext a3, rINST, 12, 4 # a3 <- B
lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
@@ -1531,7 +1524,6 @@
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
- .extern MterpProfileBranch
ext a2, rINST, 8, 4 # a2 <- A
ext a3, rINST, 12, 4 # a3 <- B
lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index feb6e08..371e2f1 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -401,6 +401,25 @@
result->SetL(constructor);
}
+void UnstartedRuntime::UnstartedClassGetDeclaringClass(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> klass(hs.NewHandle(
+ reinterpret_cast<mirror::Class*>(shadow_frame->GetVRegReference(arg_offset))));
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+ result->SetL(nullptr);
+ return;
+ }
+ // Return null for anonymous classes.
+ JValue is_anon_result;
+ UnstartedClassIsAnonymousClass(self, shadow_frame, &is_anon_result, arg_offset);
+ if (is_anon_result.GetZ() != 0) {
+ result->SetL(nullptr);
+ return;
+ }
+ result->SetL(annotations::GetDeclaringClass(klass));
+}
+
void UnstartedRuntime::UnstartedClassGetEnclosingClass(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
StackHandleScope<1> hs(self);
@@ -420,6 +439,23 @@
result->SetI(mirror::Class::GetInnerClassFlags(klass, default_value));
}
+void UnstartedRuntime::UnstartedClassIsAnonymousClass(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> klass(hs.NewHandle(
+ reinterpret_cast<mirror::Class*>(shadow_frame->GetVRegReference(arg_offset))));
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+ result->SetZ(false);
+ return;
+ }
+ mirror::String* class_name = nullptr;
+ if (!annotations::GetInnerClass(klass, &class_name)) {
+ result->SetZ(false);
+ return;
+ }
+ result->SetZ(class_name == nullptr);
+}
+
static std::unique_ptr<MemMap> FindAndExtractEntry(const std::string& jar_file,
const char* entry_name,
size_t* size,
diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h
index b8553b5..96b35e4 100644
--- a/runtime/interpreter/unstarted_runtime_list.h
+++ b/runtime/interpreter/unstarted_runtime_list.h
@@ -28,8 +28,10 @@
V(ClassGetDeclaredField, "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") \
V(ClassGetDeclaredMethod, "java.lang.reflect.Method java.lang.Class.getDeclaredMethodInternal(java.lang.String, java.lang.Class[])") \
V(ClassGetDeclaredConstructor, "java.lang.reflect.Constructor java.lang.Class.getDeclaredConstructorInternal(java.lang.Class[])") \
+ V(ClassGetDeclaringClass, "java.lang.Class java.lang.Class.getDeclaringClass()") \
V(ClassGetEnclosingClass, "java.lang.Class java.lang.Class.getEnclosingClass()") \
V(ClassGetInnerClassFlags, "int java.lang.Class.getInnerClassFlags(int)") \
+ V(ClassIsAnonymousClass, "boolean java.lang.Class.isAnonymousClass()") \
V(ClassLoaderGetResourceAsStream, "java.io.InputStream java.lang.ClassLoader.getResourceAsStream(java.lang.String)") \
V(VmClassLoaderFindLoadedClass, "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") \
V(VoidLookupType, "java.lang.Class java.lang.Void.lookupType()") \
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index b190c81..ae55f4c 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -885,5 +885,64 @@
ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
+TEST_F(UnstartedRuntimeTest, IsAnonymousClass) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+
+ JValue result;
+ ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ mirror::Class* class_klass = mirror::Class::GetJavaLangClass();
+ shadow_frame->SetVRegReference(0, class_klass);
+ UnstartedClassIsAnonymousClass(self, shadow_frame, &result, 0);
+ EXPECT_EQ(result.GetZ(), 0);
+
+ jobject class_loader = LoadDex("Nested");
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader)));
+ mirror::Class* c = class_linker_->FindClass(soa.Self(), "LNested$1;", loader);
+ ASSERT_TRUE(c != nullptr);
+ shadow_frame->SetVRegReference(0, c);
+ UnstartedClassIsAnonymousClass(self, shadow_frame, &result, 0);
+ EXPECT_EQ(result.GetZ(), 1);
+
+ ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
+}
+
+TEST_F(UnstartedRuntimeTest, GetDeclaringClass) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+
+ JValue result;
+ ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ jobject class_loader = LoadDex("Nested");
+ StackHandleScope<4> hs(self);
+ Handle<mirror::ClassLoader> loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader)));
+
+ Handle<mirror::Class> nested_klass(hs.NewHandle(
+ class_linker_->FindClass(soa.Self(), "LNested;", loader)));
+ Handle<mirror::Class> inner_klass(hs.NewHandle(
+ class_linker_->FindClass(soa.Self(), "LNested$Inner;", loader)));
+ Handle<mirror::Class> anon_klass(hs.NewHandle(
+ class_linker_->FindClass(soa.Self(), "LNested$1;", loader)));
+
+ shadow_frame->SetVRegReference(0, nested_klass.Get());
+ UnstartedClassGetDeclaringClass(self, shadow_frame, &result, 0);
+ EXPECT_EQ(result.GetL(), nullptr);
+
+ shadow_frame->SetVRegReference(0, inner_klass.Get());
+ UnstartedClassGetDeclaringClass(self, shadow_frame, &result, 0);
+ EXPECT_EQ(result.GetL(), nested_klass.Get());
+
+ shadow_frame->SetVRegReference(0, anon_klass.Get());
+ UnstartedClassGetDeclaringClass(self, shadow_frame, &result, 0);
+ EXPECT_EQ(result.GetL(), nullptr);
+
+ ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
+}
+
} // namespace interpreter
} // namespace art
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index 170887e..4615574 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -19,6 +19,7 @@
#include "handle_scope-inl.h"
#include "jni_internal.h"
#include "mirror/class.h"
+#include "mirror/throwable.h"
#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 6deb03d..fec3c4f 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -145,7 +145,12 @@
cumulative_timings_("JIT timings"),
memory_use_("Memory used for compilation", 16),
lock_("JIT memory use lock"),
- use_jit_compilation_(true) {}
+ use_jit_compilation_(true),
+ hot_method_threshold_(0),
+ warm_method_threshold_(0),
+ osr_method_threshold_(0),
+ priority_thread_weight_(0),
+ invoke_transition_weight_(0) {}
Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
DCHECK(options->UseJitCompilation() || options->GetProfileSaverOptions().IsEnabled());
@@ -289,7 +294,11 @@
void Jit::CreateThreadPool() {
// There is a DCHECK in the 'AddSamples' method to ensure the tread pool
// is not null when we instrument.
- thread_pool_.reset(new ThreadPool("Jit thread pool", 1));
+
+ // We need peers as we may report the JIT thread, e.g., in the debugger.
+ constexpr bool kJitPoolNeedsPeers = true;
+ thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
+
thread_pool_->SetPthreadPriority(kJitPoolThreadPthreadPriority);
Start();
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 4112142..d566799 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -54,6 +54,8 @@
static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 10000;
static constexpr size_t kDefaultPriorityThreadWeightRatio = 1000;
static constexpr size_t kDefaultInvokeTransitionWeightRatio = 500;
+ // How frequently should the interpreter check to see if OSR compilation is ready.
+ static constexpr int16_t kJitRecheckOSRThreshold = 100;
virtual ~Jit();
static Jit* Create(JitOptions* options, std::string* error_msg);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 6336cdd..f5151b5 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -594,7 +594,7 @@
VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
return nullptr;
}
- DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsDebuggable())
+ DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable())
<< "Should not be using cha on debuggable apps/runs!";
for (ArtMethod* single_impl : cha_single_implementation_list) {
@@ -1141,8 +1141,17 @@
return nullptr;
}
if (kIsDebugBuild && method != nullptr) {
- DCHECK_EQ(it->second, method)
- << ArtMethod::PrettyMethod(method) << " " << ArtMethod::PrettyMethod(it->second) << " "
+ // When we are walking the stack to redefine classes and creating obsolete methods it is
+ // possible that we might have updated the method_code_map by making this method obsolete in a
+ // previous frame. Therefore we should just check that the non-obsolete version of this method
+ // is the one we expect. We change to the non-obsolete versions in the error message since the
+ // obsolete version of the method might not be fully initialized yet. This situation can only
+ // occur when we are in the process of allocating and setting up obsolete methods. Otherwise
+ // method and it->second should be identical. (See runtime/openjdkjvmti/ti_redefine.cc for more
+ // information.)
+ DCHECK_EQ(it->second->GetNonObsoleteMethod(), method->GetNonObsoleteMethod())
+ << ArtMethod::PrettyMethod(method->GetNonObsoleteMethod()) << " "
+ << ArtMethod::PrettyMethod(it->second->GetNonObsoleteMethod()) << " "
<< std::hex << pc;
}
return method_header;
diff --git a/runtime/memory_region.cc b/runtime/memory_region.cc
index a5c70c3..13cc5c9 100644
--- a/runtime/memory_region.cc
+++ b/runtime/memory_region.cc
@@ -29,8 +29,39 @@
CHECK_GT(from.size(), 0U);
CHECK_GE(this->size(), from.size());
CHECK_LE(offset, this->size() - from.size());
- memmove(reinterpret_cast<void*>(start() + offset),
- from.pointer(), from.size());
+ memmove(reinterpret_cast<void*>(begin() + offset), from.pointer(), from.size());
+}
+
+void MemoryRegion::StoreBits(uintptr_t bit_offset, uint32_t value, size_t length) {
+ DCHECK_LE(value, MaxInt<uint32_t>(length));
+ DCHECK_LE(length, BitSizeOf<uint32_t>());
+ DCHECK_LE(bit_offset + length, size_in_bits());
+ if (length == 0) {
+ return;
+ }
+ // Bits are stored in this order {7 6 5 4 3 2 1 0}.
+ // How many remaining bits in current byte is (bit_offset % kBitsPerByte) + 1.
+ uint8_t* out = ComputeInternalPointer<uint8_t>(bit_offset >> kBitsPerByteLog2);
+ size_t orig_len = length;
+ uint32_t orig_value = value;
+ uintptr_t bit_remainder = bit_offset % kBitsPerByte;
+ while (true) {
+ const uintptr_t remaining_bits = kBitsPerByte - bit_remainder;
+ if (length <= remaining_bits) {
+ // Length is smaller than all of remainder bits.
+ size_t mask = ((1 << length) - 1) << bit_remainder;
+ *out = (*out & ~mask) | (value << bit_remainder);
+ break;
+ }
+ // Copy remaining bits in current byte.
+ size_t value_mask = (1 << remaining_bits) - 1;
+ *out = (*out & ~(value_mask << bit_remainder)) | ((value & value_mask) << bit_remainder);
+ value >>= remaining_bits;
+ bit_remainder = 0;
+ length -= remaining_bits;
+ ++out;
+ }
+ DCHECK_EQ(LoadBits(bit_offset, orig_len), orig_value) << bit_offset << " " << orig_len;
}
} // namespace art
diff --git a/runtime/memory_region.h b/runtime/memory_region.h
index fe3f917..7cf5d49 100644
--- a/runtime/memory_region.h
+++ b/runtime/memory_region.h
@@ -35,6 +35,12 @@
// of the region.
class MemoryRegion FINAL : public ValueObject {
public:
+ struct ContentEquals {
+ constexpr bool operator()(const MemoryRegion& lhs, const MemoryRegion& rhs) const {
+ return lhs.size() == rhs.size() && memcmp(lhs.begin(), rhs.begin(), lhs.size()) == 0;
+ }
+ };
+
MemoryRegion() : pointer_(nullptr), size_(0) {}
MemoryRegion(void* pointer_in, uintptr_t size_in) : pointer_(pointer_in), size_(size_in) {}
@@ -46,8 +52,8 @@
return OFFSETOF_MEMBER(MemoryRegion, pointer_);
}
- uint8_t* start() const { return reinterpret_cast<uint8_t*>(pointer_); }
- uint8_t* end() const { return start() + size_; }
+ uint8_t* begin() const { return reinterpret_cast<uint8_t*>(pointer_); }
+ uint8_t* end() const { return begin() + size_; }
// Load value of type `T` at `offset`. The memory address corresponding
// to `offset` should be word-aligned (on ARM, this is a requirement).
@@ -124,11 +130,35 @@
// The bit at the smallest offset is the least significant bit in the
// loaded value. `length` must not be larger than the number of bits
// contained in the return value (32).
- uint32_t LoadBits(uintptr_t bit_offset, size_t length) const {
- CHECK_LE(length, sizeof(uint32_t) * kBitsPerByte);
- uint32_t value = 0u;
+ ALWAYS_INLINE uint32_t LoadBits(uintptr_t bit_offset, size_t length) const {
+ DCHECK_LE(length, BitSizeOf<uint32_t>());
+ DCHECK_LE(bit_offset + length, size_in_bits());
+ if (UNLIKELY(length == 0)) {
+ // Do not touch any memory if the range is empty.
+ return 0;
+ }
+ const uint8_t* address = begin() + bit_offset / kBitsPerByte;
+ const uint32_t shift = bit_offset & (kBitsPerByte - 1);
+ // Load the value (reading only the strictly needed bytes).
+ const uint32_t load_bit_count = shift + length;
+ uint32_t value = address[0] >> shift;
+ if (load_bit_count > 8) {
+ value |= static_cast<uint32_t>(address[1]) << (8 - shift);
+ if (load_bit_count > 16) {
+ value |= static_cast<uint32_t>(address[2]) << (16 - shift);
+ if (load_bit_count > 24) {
+ value |= static_cast<uint32_t>(address[3]) << (24 - shift);
+ if (load_bit_count > 32) {
+ value |= static_cast<uint32_t>(address[4]) << (32 - shift);
+ }
+ }
+ }
+ }
+ // Clear unwanted most significant bits.
+ uint32_t clear_bit_count = BitSizeOf(value) - length;
+ value = (value << clear_bit_count) >> clear_bit_count;
for (size_t i = 0; i < length; ++i) {
- value |= LoadBit(bit_offset + i) << i;
+ DCHECK_EQ((value >> i) & 1, LoadBit(bit_offset + i));
}
return value;
}
@@ -137,21 +167,22 @@
// `bit_offset`. The bit at the smallest offset is the least significant
// bit of the stored `value`. `value` must not be larger than `length`
// bits.
- void StoreBits(uintptr_t bit_offset, uint32_t value, size_t length) {
- CHECK_LE(value, MaxInt<uint32_t>(length));
- for (size_t i = 0; i < length; ++i) {
- bool ith_bit = value & (1 << i);
- StoreBit(bit_offset + i, ith_bit);
- }
- }
+ void StoreBits(uintptr_t bit_offset, uint32_t value, size_t length);
void CopyFrom(size_t offset, const MemoryRegion& from) const;
+ template<class Vector>
+ void CopyFromVector(size_t offset, Vector& vector) const {
+ if (!vector.empty()) {
+ CopyFrom(offset, MemoryRegion(vector.data(), vector.size()));
+ }
+ }
+
// Compute a sub memory region based on an existing one.
ALWAYS_INLINE MemoryRegion Subregion(uintptr_t offset, uintptr_t size_in) const {
CHECK_GE(this->size(), size_in);
CHECK_LE(offset, this->size() - size_in);
- return MemoryRegion(reinterpret_cast<void*>(start() + offset), size_in);
+ return MemoryRegion(reinterpret_cast<void*>(begin() + offset), size_in);
}
// Compute an extended memory region based on an existing one.
@@ -165,7 +196,7 @@
ALWAYS_INLINE T* ComputeInternalPointer(size_t offset) const {
CHECK_GE(size(), sizeof(T));
CHECK_LE(offset, size() - sizeof(T));
- return reinterpret_cast<T*>(start() + offset);
+ return reinterpret_cast<T*>(begin() + offset);
}
// Locate the bit with the given offset. Returns a pointer to the byte
diff --git a/runtime/memory_region_test.cc b/runtime/memory_region_test.cc
index 72e03a4..6634c60 100644
--- a/runtime/memory_region_test.cc
+++ b/runtime/memory_region_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "bit_memory_region.h"
#include "memory_region.h"
#include "gtest/gtest.h"
@@ -55,4 +56,35 @@
}
}
+TEST(MemoryRegion, TestBits) {
+ const size_t n = 8;
+ uint8_t data[n] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ MemoryRegion region(&data, n);
+ uint32_t value = 0xDEADBEEF;
+ // Try various offsets and lengths.
+ for (size_t bit_offset = 0; bit_offset < 2 * kBitsPerByte; ++bit_offset) {
+ for (size_t length = 0; length < 2 * kBitsPerByte; ++length) {
+ const uint32_t length_mask = (1 << length) - 1;
+ uint32_t masked_value = value & length_mask;
+ BitMemoryRegion bmr(region, bit_offset, length);
+ region.StoreBits(bit_offset, masked_value, length);
+ EXPECT_EQ(region.LoadBits(bit_offset, length), masked_value);
+ EXPECT_EQ(bmr.LoadBits(0, length), masked_value);
+ // Check adjacent bits to make sure they were not incorrectly cleared.
+ EXPECT_EQ(region.LoadBits(0, bit_offset), (1u << bit_offset) - 1);
+ EXPECT_EQ(region.LoadBits(bit_offset + length, length), length_mask);
+ region.StoreBits(bit_offset, length_mask, length);
+ // Store with bit memory region.
+ bmr.StoreBits(0, masked_value, length);
+ EXPECT_EQ(bmr.LoadBits(0, length), masked_value);
+ // Check adjacent bits to make sure they were not incorrectly cleared.
+ EXPECT_EQ(region.LoadBits(0, bit_offset), (1u << bit_offset) - 1);
+ EXPECT_EQ(region.LoadBits(bit_offset + length, length), length_mask);
+ region.StoreBits(bit_offset, length_mask, length);
+ // Flip the value to try different edge bit combinations.
+ value = ~value;
+ }
+ }
+}
+
} // namespace art
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index a5db0c0..f56226b 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -207,6 +207,19 @@
}
template<typename T>
+inline PrimitiveArray<T>* PrimitiveArray<T>::AllocateAndFill(Thread* self,
+ const T* data,
+ size_t length) {
+ StackHandleScope<1> hs(self);
+ Handle<PrimitiveArray<T>> arr(hs.NewHandle(PrimitiveArray<T>::Alloc(self, length)));
+ if (!arr.IsNull()) {
+ // Copy it in. Just skip if it's null
+ memcpy(arr->GetData(), data, sizeof(T) * length);
+ }
+ return arr.Get();
+}
+
+template<typename T>
inline PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
Array* raw_array = Array::Alloc<true>(self,
GetArrayClass(),
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 19d300e..16cf30f 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -119,6 +119,10 @@
static PrimitiveArray<T>* Alloc(Thread* self, size_t length)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ static PrimitiveArray<T>* AllocateAndFill(Thread* self, const T* data, size_t length)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+
+
const T* GetData() const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<const T*>(GetRawData(sizeof(T), 0));
}
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 2fb8d28..2cff47e 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -65,15 +65,27 @@
OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
}
+inline void Class::SetSuperClass(ObjPtr<Class> new_super_class) {
+ // Super class is assigned once, except during class linker initialization.
+ if (kIsDebugBuild) {
+ ObjPtr<Class> old_super_class =
+ GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
+ DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
+ }
+ DCHECK(new_super_class != nullptr);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
+}
+
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline ClassLoader* Class::GetClassLoader() {
return GetFieldObject<ClassLoader, kVerifyFlags, kReadBarrierOption>(
OFFSET_OF_OBJECT_MEMBER(Class, class_loader_));
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline DexCache* Class::GetDexCache() {
- return GetFieldObject<DexCache, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_));
+ return GetFieldObject<DexCache, kVerifyFlags, kReadBarrierOption>(
+ OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_));
}
inline uint32_t Class::GetCopiedMethodsStartOffset() {
@@ -634,23 +646,6 @@
}
}
-template<VerifyObjectFlags kVerifyFlags>
-inline uint32_t Class::GetAccessFlags() {
- // Check class is loaded/retired or this is java.lang.String that has a
- // circularity issue during loading the names of its members
- DCHECK(IsIdxLoaded<kVerifyFlags>() || IsRetired<kVerifyFlags>() ||
- IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() ||
- this == String::GetJavaLangString())
- << "IsIdxLoaded=" << IsIdxLoaded<kVerifyFlags>()
- << " IsRetired=" << IsRetired<kVerifyFlags>()
- << " IsErroneous=" <<
- IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()
- << " IsString=" << (this == String::GetJavaLangString())
- << " status= " << GetStatus<kVerifyFlags>()
- << " descriptor=" << PrettyDescriptor();
- return GetField32<kVerifyFlags>(AccessFlagsOffset());
-}
-
inline String* Class::GetName() {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Class, name_));
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 9964b73..1b8f3f8 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -115,7 +115,9 @@
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
bool class_linker_initialized = class_linker != nullptr && class_linker->IsInitialized();
if (LIKELY(class_linker_initialized)) {
- if (UNLIKELY(new_status <= old_status && new_status != kStatusError &&
+ if (UNLIKELY(new_status <= old_status &&
+ new_status != kStatusErrorUnresolved &&
+ new_status != kStatusErrorResolved &&
new_status != kStatusRetired)) {
LOG(FATAL) << "Unexpected change back of class status for " << h_this->PrettyClass()
<< " " << old_status << " -> " << new_status;
@@ -127,10 +129,12 @@
<< h_this->PrettyClass() << " " << old_status << " -> " << new_status;
}
}
- if (UNLIKELY(new_status == kStatusError)) {
- CHECK_NE(h_this->GetStatus(), kStatusError)
+ if (UNLIKELY(IsErroneous(new_status))) {
+ CHECK(!h_this->IsErroneous())
<< "Attempt to set as erroneous an already erroneous class "
- << h_this->PrettyClass();
+ << h_this->PrettyClass()
+ << " old_status: " << old_status << " new_status: " << new_status;
+ CHECK_EQ(new_status == kStatusErrorResolved, old_status >= kStatusResolved);
if (VLOG_IS_ON(class_linker)) {
LOG(ERROR) << "Setting " << h_this->PrettyDescriptor() << " to erroneous.";
if (self->IsExceptionPending()) {
@@ -177,7 +181,7 @@
// Class is a temporary one, ensure that waiters for resolution get notified of retirement
// so that they can grab the new version of the class from the class linker's table.
CHECK_LT(new_status, kStatusResolved) << h_this->PrettyDescriptor();
- if (new_status == kStatusRetired || new_status == kStatusError) {
+ if (new_status == kStatusRetired || new_status == kStatusErrorUnresolved) {
h_this->NotifyAll(self);
}
} else {
@@ -305,7 +309,7 @@
}
if (h_this->NumStaticFields() > 0) {
os << " static fields (" << h_this->NumStaticFields() << " entries):\n";
- if (h_this->IsResolved() || h_this->IsErroneous()) {
+ if (h_this->IsResolved()) {
for (size_t i = 0; i < h_this->NumStaticFields(); ++i) {
os << StringPrintf(" %2zd: %s\n", i,
ArtField::PrettyField(h_this->GetStaticField(i)).c_str());
@@ -316,7 +320,7 @@
}
if (h_this->NumInstanceFields() > 0) {
os << " instance fields (" << h_this->NumInstanceFields() << " entries):\n";
- if (h_this->IsResolved() || h_this->IsErroneous()) {
+ if (h_this->IsResolved()) {
for (size_t i = 0; i < h_this->NumInstanceFields(); ++i) {
os << StringPrintf(" %2zd: %s\n", i,
ArtField::PrettyField(h_this->GetInstanceField(i)).c_str());
@@ -1341,5 +1345,26 @@
return result;
}
+template<VerifyObjectFlags kVerifyFlags> void Class::GetAccessFlagsDCheck() {
+ // Check class is loaded/retired or this is java.lang.String that has a
+ // circularity issue during loading the names of its members
+ DCHECK(IsIdxLoaded<kVerifyFlags>() || IsRetired<kVerifyFlags>() ||
+ IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() ||
+ this == String::GetJavaLangString())
+ << "IsIdxLoaded=" << IsIdxLoaded<kVerifyFlags>()
+ << " IsRetired=" << IsRetired<kVerifyFlags>()
+ << " IsErroneous=" <<
+ IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()
+ << " IsString=" << (this == String::GetJavaLangString())
+ << " status= " << GetStatus<kVerifyFlags>()
+ << " descriptor=" << PrettyDescriptor();
+}
+// Instantiate the common cases.
+template void Class::GetAccessFlagsDCheck<kVerifyNone>();
+template void Class::GetAccessFlagsDCheck<kVerifyThis>();
+template void Class::GetAccessFlagsDCheck<kVerifyReads>();
+template void Class::GetAccessFlagsDCheck<kVerifyWrites>();
+template void Class::GetAccessFlagsDCheck<kVerifyAll>();
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index fb2792a..d34f09c 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -84,6 +84,13 @@
// will be gc'ed once all refs to the class point to the newly
// cloned version.
//
+ // kStatusErrorUnresolved, kStatusErrorResolved: Class is erroneous. We need
+ // to distinguish between classes that have been resolved and classes that
+ // have not. This is important because the const-class instruction needs to
+ // return a previously resolved class even if its subsequent initialization
+ // failed. We also need this to decide whether to wrap a previous
+ // initialization failure in ClassDefNotFound error or not.
+ //
// kStatusNotReady: If a Class cannot be found in the class table by
// FindClass, it allocates an new one with AllocClass in the
// kStatusNotReady and calls LoadClass. Note if it does find a
@@ -119,8 +126,9 @@
//
// TODO: Explain the other states
enum Status {
- kStatusRetired = -2, // Retired, should not be used. Use the newly cloned one instead.
- kStatusError = -1,
+ kStatusRetired = -3, // Retired, should not be used. Use the newly cloned one instead.
+ kStatusErrorResolved = -2,
+ kStatusErrorUnresolved = -1,
kStatusNotReady = 0,
kStatusIdx = 1, // Loaded, DEX idx in super_class_type_idx_ and interfaces_type_idx_.
kStatusLoaded = 2, // DEX idx values resolved.
@@ -158,8 +166,25 @@
// Returns true if the class has failed to link.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool IsErroneousUnresolved() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetStatus<kVerifyFlags>() == kStatusErrorUnresolved;
+ }
+
+ // Returns true if the class has failed to initialize.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool IsErroneousResolved() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetStatus<kVerifyFlags>() == kStatusErrorResolved;
+ }
+
+ // Returns true if the class status indicets that the class has failed to link or initialize.
+ static bool IsErroneous(Status status) {
+ return status == kStatusErrorUnresolved || status == kStatusErrorResolved;
+ }
+
+ // Returns true if the class has failed to link or initialize.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsErroneous() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetStatus<kVerifyFlags>() == kStatusError;
+ return IsErroneous(GetStatus<kVerifyFlags>());
}
// Returns true if the class has been loaded.
@@ -177,7 +202,8 @@
// Returns true if the class has been linked.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsResolved() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetStatus<kVerifyFlags>() >= kStatusResolved;
+ Status status = GetStatus<kVerifyFlags>();
+ return status >= kStatusResolved || status == kStatusErrorResolved;
}
// Returns true if the class was compile-time verified.
@@ -205,7 +231,13 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kIsDebugBuild) {
+ GetAccessFlagsDCheck<kVerifyFlags>();
+ }
+ return GetField32<kVerifyFlags>(AccessFlagsOffset());
+ }
+
static MemberOffset AccessFlagsOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
}
@@ -345,7 +377,7 @@
// be replaced with a class with the right size for embedded imt/vtable.
bool IsTemp() REQUIRES_SHARED(Locks::mutator_lock_) {
Status s = GetStatus();
- return s < Status::kStatusResolving && ShouldHaveEmbeddedVTable();
+ return s < Status::kStatusResolving && s != kStatusErrorResolved && ShouldHaveEmbeddedVTable();
}
String* GetName() REQUIRES_SHARED(Locks::mutator_lock_); // Returns the cached name.
@@ -657,14 +689,7 @@
// `This` and `klass` must be classes.
ObjPtr<Class> GetCommonSuperClass(Handle<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetSuperClass(ObjPtr<Class> new_super_class) REQUIRES_SHARED(Locks::mutator_lock_) {
- // Super class is assigned once, except during class linker initialization.
- ObjPtr<Class> old_super_class =
- GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
- DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
- DCHECK(new_super_class != nullptr);
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
- }
+ void SetSuperClass(ObjPtr<Class> new_super_class) REQUIRES_SHARED(Locks::mutator_lock_);
bool HasSuperClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetSuperClass() != nullptr;
@@ -696,7 +721,8 @@
void DumpClass(std::ostream& os, int flags) REQUIRES_SHARED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
DexCache* GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
// Also updates the dex_cache_strings_ variable from new_dex_cache.
@@ -1017,7 +1043,7 @@
// Returns the number of instance fields containing reference types. Does not count fields in any
// super classes.
uint32_t NumReferenceInstanceFields() REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(IsResolved() || IsErroneous());
+ DCHECK(IsResolved());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
}
@@ -1045,7 +1071,7 @@
// Returns the number of static fields containing reference types.
uint32_t NumReferenceStaticFields() REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(IsResolved() || IsErroneous());
+ DCHECK(IsResolved());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
}
@@ -1370,6 +1396,9 @@
bool ProxyDescriptorEquals(const char* match) REQUIRES_SHARED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags>
+ void GetAccessFlagsDCheck() REQUIRES_SHARED(Locks::mutator_lock_);
+
// Check that the pointer size matches the one in the class linker.
ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size);
diff --git a/runtime/mirror/method_handle_impl.h b/runtime/mirror/method_handle_impl.h
index abe999a..53d267b 100644
--- a/runtime/mirror/method_handle_impl.h
+++ b/runtime/mirror/method_handle_impl.h
@@ -19,12 +19,13 @@
#include "class.h"
#include "gc_root.h"
-#include "object.h"
+#include "object-inl.h"
#include "method_handles.h"
#include "method_type.h"
namespace art {
+struct MethodHandleOffsets;
struct MethodHandleImplOffsets;
namespace mirror {
@@ -84,10 +85,12 @@
static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_);
private:
+ // NOTE: cached_spread_invoker_ isn't used by the runtime.
+ HeapReference<mirror::MethodHandle> cached_spread_invoker_;
HeapReference<mirror::MethodType> nominal_type_;
HeapReference<mirror::MethodType> method_type_;
- uint64_t art_field_or_method_;
uint32_t handle_kind_;
+ uint64_t art_field_or_method_;
private:
static MemberOffset NominalTypeOffset() {
@@ -103,7 +106,7 @@
return MemberOffset(OFFSETOF_MEMBER(MethodHandle, handle_kind_));
}
- friend struct art::MethodHandleImplOffsets; // for verifying offset information
+ friend struct art::MethodHandleOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(MethodHandle);
};
@@ -119,6 +122,11 @@
static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
+ static MemberOffset InfoOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(MethodHandleImpl, info_));
+ }
+
+ HeapReference<mirror::Object> info_; // Unused by the runtime.
static GcRoot<mirror::Class> static_class_; // java.lang.invoke.MethodHandleImpl.class
friend struct art::MethodHandleImplOffsets; // for verifying offset information
diff --git a/runtime/mirror/method_type_test.cc b/runtime/mirror/method_type_test.cc
index 03ab930..637bafd 100644
--- a/runtime/mirror/method_type_test.cc
+++ b/runtime/mirror/method_type_test.cc
@@ -19,12 +19,13 @@
#include <string>
#include <vector>
+#include "class-inl.h"
#include "class_linker.h"
+#include "class_loader.h"
#include "common_runtime_test.h"
#include "handle_scope-inl.h"
-#include "runtime/mirror/class.h"
-#include "runtime/mirror/class_loader.h"
-#include "scoped_thread_state_change.h"
+#include "object_array-inl.h"
+#include "scoped_thread_state_change-inl.h"
namespace art {
namespace mirror {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 354410e..8e591e4 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -142,8 +142,10 @@
}
inline uint32_t Object::GetReadBarrierState(uintptr_t* fake_address_dependency) {
-#ifdef USE_BAKER_READ_BARRIER
- CHECK(kUseBakerReadBarrier);
+ if (!kUseBakerReadBarrier) {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
#if defined(__arm__)
uintptr_t obj = reinterpret_cast<uintptr_t>(this);
uintptr_t result;
@@ -190,37 +192,29 @@
UNREACHABLE();
UNUSED(fake_address_dependency);
#endif
-#else // !USE_BAKER_READ_BARRIER
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
- UNUSED(fake_address_dependency);
-#endif
}
inline uint32_t Object::GetReadBarrierState() {
-#ifdef USE_BAKER_READ_BARRIER
+ if (!kUseBakerReadBarrier) {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
DCHECK(kUseBakerReadBarrier);
LockWord lw(GetField<uint32_t, /*kIsVolatile*/false>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
uint32_t rb_state = lw.ReadBarrierState();
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
return rb_state;
-#else
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
-#endif
}
inline uint32_t Object::GetReadBarrierStateAcquire() {
-#ifdef USE_BAKER_READ_BARRIER
- DCHECK(kUseBakerReadBarrier);
+ if (!kUseBakerReadBarrier) {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
LockWord lw(GetFieldAcquire<uint32_t>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
uint32_t rb_state = lw.ReadBarrierState();
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
return rb_state;
-#else
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
-#endif
}
inline uint32_t Object::GetMarkBit() {
@@ -233,23 +227,22 @@
}
inline void Object::SetReadBarrierState(uint32_t rb_state) {
-#ifdef USE_BAKER_READ_BARRIER
- DCHECK(kUseBakerReadBarrier);
+ if (!kUseBakerReadBarrier) {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
LockWord lw = GetLockWord(false);
lw.SetReadBarrierState(rb_state);
SetLockWord(lw, false);
-#else
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
- UNUSED(rb_state);
-#endif
}
template<bool kCasRelease>
inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state) {
-#ifdef USE_BAKER_READ_BARRIER
- DCHECK(kUseBakerReadBarrier);
+ if (!kUseBakerReadBarrier) {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
DCHECK(ReadBarrier::IsValidReadBarrierState(expected_rb_state)) << expected_rb_state;
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
LockWord expected_lw;
@@ -272,11 +265,6 @@
CasLockWordWeakRelease(expected_lw, new_lw) :
CasLockWordWeakRelaxed(expected_lw, new_lw)));
return true;
-#else
- UNUSED(expected_rb_state, rb_state);
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
-#endif
}
inline bool Object::AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit) {
@@ -691,19 +679,6 @@
field_offset, new_value);
}
-template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
-inline int32_t Object::GetField32(MemberOffset field_offset) {
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- return GetField<int32_t, kIsVolatile>(field_offset);
-}
-
-template<VerifyObjectFlags kVerifyFlags>
-inline int32_t Object::GetField32Volatile(MemberOffset field_offset) {
- return GetField32<kVerifyFlags, true>(field_offset);
-}
-
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetField32(MemberOffset field_offset, int32_t new_value) {
@@ -854,28 +829,6 @@
new_value);
}
-template<typename kSize, bool kIsVolatile>
-inline void Object::SetField(MemberOffset field_offset, kSize new_value) {
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- kSize* addr = reinterpret_cast<kSize*>(raw_addr);
- if (kIsVolatile) {
- reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value);
- } else {
- reinterpret_cast<Atomic<kSize>*>(addr)->StoreJavaData(new_value);
- }
-}
-
-template<typename kSize, bool kIsVolatile>
-inline kSize Object::GetField(MemberOffset field_offset) {
- const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
- const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
- if (kIsVolatile) {
- return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent();
- } else {
- return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadJavaData();
- }
-}
-
template<typename kSize>
inline kSize Object::GetFieldAcquire(MemberOffset field_offset) {
const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index db58a60..4541ce2 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_OBJECT_H_
#define ART_RUNTIME_MIRROR_OBJECT_H_
+#include "atomic.h"
#include "base/casts.h"
#include "base/enums.h"
#include "globals.h"
@@ -432,11 +433,18 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ return GetField<int32_t, kIsVolatile>(field_offset);
+ }
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int32_t GetField32Volatile(MemberOffset field_offset)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetField32<kVerifyFlags, true>(field_offset);
+ }
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
@@ -611,10 +619,28 @@
private:
template<typename kSize, bool kIsVolatile>
ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+ kSize* addr = reinterpret_cast<kSize*>(raw_addr);
+ if (kIsVolatile) {
+ reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value);
+ } else {
+ reinterpret_cast<Atomic<kSize>*>(addr)->StoreJavaData(new_value);
+ }
+ }
+
template<typename kSize, bool kIsVolatile>
ALWAYS_INLINE kSize GetField(MemberOffset field_offset)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
+ const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
+ if (kIsVolatile) {
+ return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent();
+ } else {
+ return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadJavaData();
+ }
+ }
+
// Get a field with acquire semantics.
template<typename kSize>
ALWAYS_INLINE kSize GetFieldAcquire(MemberOffset field_offset)
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 071b0e2..a32003e 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -303,6 +303,7 @@
ArtMethod* owners_method,
uint32_t owners_dex_pc,
size_t num_waiters) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
const char* owners_filename;
int32_t owners_line_number = 0;
if (owners_method != nullptr) {
@@ -355,36 +356,44 @@
// Do this before releasing the lock so that we don't get deflated.
size_t num_waiters = num_waiters_;
++num_waiters_;
+
+ // If systrace logging is enabled, first look at the lock owner. Acquiring the monitor's
+ // lock and then re-acquiring the mutator lock can deadlock.
+ bool started_trace = false;
+ if (ATRACE_ENABLED()) {
+ if (owner_ != nullptr) { // Did the owner_ give the lock up?
+ std::ostringstream oss;
+ std::string name;
+ owner_->GetThreadName(name);
+ oss << PrettyContentionInfo(name,
+ owner_->GetTid(),
+ owners_method,
+ owners_dex_pc,
+ num_waiters);
+ // Add info for contending thread.
+ uint32_t pc;
+ ArtMethod* m = self->GetCurrentMethod(&pc);
+ const char* filename;
+ int32_t line_number;
+ TranslateLocation(m, pc, &filename, &line_number);
+ oss << " blocking from "
+ << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
+ << ":" << line_number << ")";
+ ATRACE_BEGIN(oss.str().c_str());
+ started_trace = true;
+ }
+ }
+
monitor_lock_.Unlock(self); // Let go of locks in order.
self->SetMonitorEnterObject(GetObject());
{
+ ScopedThreadSuspension tsc(self, kBlocked); // Change to blocked and give up mutator_lock_.
uint32_t original_owner_thread_id = 0u;
- ScopedThreadStateChange tsc(self, kBlocked); // Change to blocked and give up mutator_lock_.
{
// Reacquire monitor_lock_ without mutator_lock_ for Wait.
MutexLock mu2(self, monitor_lock_);
if (owner_ != nullptr) { // Did the owner_ give the lock up?
original_owner_thread_id = owner_->GetThreadId();
- if (ATRACE_ENABLED()) {
- std::ostringstream oss;
- std::string name;
- owner_->GetThreadName(name);
- oss << PrettyContentionInfo(name,
- owner_->GetTid(),
- owners_method,
- owners_dex_pc,
- num_waiters);
- // Add info for contending thread.
- uint32_t pc;
- ArtMethod* m = self->GetCurrentMethod(&pc);
- const char* filename;
- int32_t line_number;
- TranslateLocation(m, pc, &filename, &line_number);
- oss << " blocking from "
- << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
- << ":" << line_number << ")";
- ATRACE_BEGIN(oss.str().c_str());
- }
monitor_contenders_.Wait(self); // Still contended so wait.
}
}
@@ -414,6 +423,8 @@
sample_percent = 100 * wait_ms / lock_profiling_threshold_;
}
if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
+ // Reacquire mutator_lock_ for logging.
+ ScopedObjectAccess soa(self);
if (wait_ms > kLongWaitMs && owners_method != nullptr) {
uint32_t pc;
ArtMethod* m = self->GetCurrentMethod(&pc);
@@ -441,9 +452,11 @@
}
}
}
- ATRACE_END();
}
}
+ if (started_trace) {
+ ATRACE_END();
+ }
self->SetMonitorEnterObject(nullptr);
monitor_lock_.Lock(self); // Reacquire locks in order.
--num_waiters_;
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 67b2e1c..0d24587 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -90,7 +90,8 @@
static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceFilename,
jobject javaFd, jint bufferSize, jint flags,
- jboolean samplingEnabled, jint intervalUs) {
+ jboolean samplingEnabled, jint intervalUs,
+ jboolean streamingOutput) {
int originalFd = jniGetFDFromFileDescriptor(env, javaFd);
if (originalFd < 0) {
return;
@@ -108,7 +109,10 @@
if (traceFilename.c_str() == nullptr) {
return;
}
- Trace::Start(traceFilename.c_str(), fd, bufferSize, flags, Trace::TraceOutputMode::kFile,
+ Trace::TraceOutputMode outputMode = streamingOutput
+ ? Trace::TraceOutputMode::kStreaming
+ : Trace::TraceOutputMode::kFile;
+ Trace::Start(traceFilename.c_str(), fd, bufferSize, flags, outputMode,
samplingEnabled ? Trace::TraceMode::kSampling : Trace::TraceMode::kMethodTracing,
intervalUs);
}
@@ -547,7 +551,7 @@
NATIVE_METHOD(VMDebug, startEmulatorTracing, "()V"),
NATIVE_METHOD(VMDebug, startInstructionCounting, "()V"),
NATIVE_METHOD(VMDebug, startMethodTracingDdmsImpl, "(IIZI)V"),
- NATIVE_METHOD(VMDebug, startMethodTracingFd, "(Ljava/lang/String;Ljava/io/FileDescriptor;IIZI)V"),
+ NATIVE_METHOD(VMDebug, startMethodTracingFd, "(Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V"),
NATIVE_METHOD(VMDebug, startMethodTracingFilename, "(Ljava/lang/String;IIZI)V"),
NATIVE_METHOD(VMDebug, stopAllocCounting, "()V"),
NATIVE_METHOD(VMDebug, stopEmulatorTracing, "()V"),
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 10fc90b..fd22d9e 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -71,7 +71,7 @@
static void EnableDebugFeatures(uint32_t debug_flags) {
// Must match values in com.android.internal.os.Zygote.
enum {
- DEBUG_ENABLE_DEBUGGER = 1,
+ DEBUG_ENABLE_JDWP = 1,
DEBUG_ENABLE_CHECKJNI = 1 << 1,
DEBUG_ENABLE_ASSERT = 1 << 2,
DEBUG_ENABLE_SAFEMODE = 1 << 3,
@@ -79,6 +79,7 @@
DEBUG_GENERATE_DEBUG_INFO = 1 << 5,
DEBUG_ALWAYS_JIT = 1 << 6,
DEBUG_NATIVE_DEBUGGABLE = 1 << 7,
+ DEBUG_JAVA_DEBUGGABLE = 1 << 8,
};
Runtime* const runtime = Runtime::Current();
@@ -100,11 +101,11 @@
debug_flags &= ~DEBUG_ENABLE_JNI_LOGGING;
}
- Dbg::SetJdwpAllowed((debug_flags & DEBUG_ENABLE_DEBUGGER) != 0);
- if ((debug_flags & DEBUG_ENABLE_DEBUGGER) != 0) {
+ Dbg::SetJdwpAllowed((debug_flags & DEBUG_ENABLE_JDWP) != 0);
+ if ((debug_flags & DEBUG_ENABLE_JDWP) != 0) {
EnableDebugger();
}
- debug_flags &= ~DEBUG_ENABLE_DEBUGGER;
+ debug_flags &= ~DEBUG_ENABLE_JDWP;
const bool safe_mode = (debug_flags & DEBUG_ENABLE_SAFEMODE) != 0;
if (safe_mode) {
@@ -130,6 +131,14 @@
debug_flags &= ~DEBUG_ALWAYS_JIT;
}
+ if ((debug_flags & DEBUG_JAVA_DEBUGGABLE) != 0) {
+ runtime->AddCompilerOption("--debuggable");
+ runtime->SetJavaDebuggable(true);
+ // Deoptimize the boot image as it may be non-debuggable.
+ runtime->DeoptimizeBootImage();
+ debug_flags &= ~DEBUG_JAVA_DEBUGGABLE;
+ }
+
if ((debug_flags & DEBUG_NATIVE_DEBUGGABLE) != 0) {
runtime->AddCompilerOption("--debuggable");
runtime->AddCompilerOption("--generate-debug-info");
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index ea266d1..f1d6ff5 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -25,7 +25,7 @@
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedLocalRef.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index fcb0175..195091f 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -25,7 +25,7 @@
#include "ScopedUtfChars.h"
#include "thread.h"
#include "thread_list.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index 284d2d1..a8fa7db 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -81,14 +81,12 @@
if (c != nullptr && c->IsErroneous()) {
cl->ThrowEarlierClassFailure(c.Ptr());
Thread* self = soa.Self();
- ObjPtr<mirror::Class> eiie_class =
- self->DecodeJObject(WellKnownClasses::java_lang_ExceptionInInitializerError)->AsClass();
ObjPtr<mirror::Class> iae_class =
self->DecodeJObject(WellKnownClasses::java_lang_IllegalAccessError)->AsClass();
ObjPtr<mirror::Class> ncdfe_class =
self->DecodeJObject(WellKnownClasses::java_lang_NoClassDefFoundError)->AsClass();
ObjPtr<mirror::Class> exception = self->GetException()->GetClass();
- if (exception == eiie_class || exception == iae_class || exception == ncdfe_class) {
+ if (exception == iae_class || exception == ncdfe_class) {
self->ThrowNewWrappedException("Ljava/lang/ClassNotFoundException;",
c->PrettyDescriptor().c_str());
}
diff --git a/runtime/native/java_lang_invoke_MethodHandleImpl.cc b/runtime/native/java_lang_invoke_MethodHandleImpl.cc
new file mode 100644
index 0000000..72a37f8
--- /dev/null
+++ b/runtime/native/java_lang_invoke_MethodHandleImpl.cc
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "java_lang_invoke_MethodHandleImpl.h"
+
+#include "art_method.h"
+#include "handle_scope-inl.h"
+#include "jni_internal.h"
+#include "mirror/field.h"
+#include "mirror/method.h"
+#include "mirror/method_handle_impl.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+
+namespace art {
+
+static jobject MethodHandleImpl_getMemberInternal(JNIEnv* env, jobject thiz) {
+ ScopedObjectAccess soa(env);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::MethodHandleImpl> handle = hs.NewHandle(
+ soa.Decode<mirror::MethodHandleImpl>(thiz));
+
+ // Check the handle kind, we need to materialize a Field for field accessors,
+ // a Method for method invokers and a Constructor for constructors.
+ const mirror::MethodHandle::Kind handle_kind = handle->GetHandleKind();
+
+ // We check this here because we pass false to CreateFromArtField and
+ // CreateFromArtMethod.
+ DCHECK(!Runtime::Current()->IsActiveTransaction());
+
+ MutableHandle<mirror::Object> h_object(hs.NewHandle<mirror::Object>(nullptr));
+ if (handle_kind >= mirror::MethodHandle::kFirstAccessorKind) {
+ ArtField* const field = handle->GetTargetField();
+ h_object.Assign(mirror::Field::CreateFromArtField<kRuntimePointerSize, false>(
+ soa.Self(), field, false /* force_resolve */));
+ } else {
+ ArtMethod* const method = handle->GetTargetMethod();
+ if (method->IsConstructor()) {
+ h_object.Assign(mirror::Constructor::CreateFromArtMethod<kRuntimePointerSize, false>(
+ soa.Self(), method));
+ } else {
+ h_object.Assign(mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(
+ soa.Self(), method));
+ }
+ }
+
+ if (UNLIKELY(h_object.Get() == nullptr)) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+
+ return soa.AddLocalReference<jobject>(h_object.Get());
+}
+
+static JNINativeMethod gMethods[] = {
+ NATIVE_METHOD(MethodHandleImpl, getMemberInternal, "()Ljava/lang/reflect/Member;"),
+};
+
+void register_java_lang_invoke_MethodHandleImpl(JNIEnv* env) {
+ REGISTER_NATIVE_METHODS("java/lang/invoke/MethodHandleImpl");
+}
+
+} // namespace art
diff --git a/runtime/native/java_lang_invoke_MethodHandleImpl.h b/runtime/native/java_lang_invoke_MethodHandleImpl.h
new file mode 100644
index 0000000..0e50371
--- /dev/null
+++ b/runtime/native/java_lang_invoke_MethodHandleImpl.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_INVOKE_METHODHANDLEIMPL_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_INVOKE_METHODHANDLEIMPL_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_invoke_MethodHandleImpl(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_INVOKE_METHODHANDLEIMPL_H_
diff --git a/runtime/native/java_lang_reflect_Proxy.cc b/runtime/native/java_lang_reflect_Proxy.cc
index ece0338..70cd6aa 100644
--- a/runtime/native/java_lang_reflect_Proxy.cc
+++ b/runtime/native/java_lang_reflect_Proxy.cc
@@ -22,7 +22,7 @@
#include "mirror/object_array.h"
#include "mirror/string.h"
#include "scoped_fast_native_object_access-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
diff --git a/runtime/oat.h b/runtime/oat.h
index 29821a2..532c968 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '1', '0', '3', '\0' }; // Native pc change
+ static constexpr uint8_t kOatVersion[] = { '1', '0', '9', '\0' }; // Register mask change.
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index d47f1b5..31eb1cc 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -193,7 +193,7 @@
bool writable,
bool low_4gb,
std::string* error_msg) {
- vdex_.reset(VdexFile::Open(vdex_filename, writable, low_4gb, error_msg));
+ vdex_ = VdexFile::Open(vdex_filename, writable, low_4gb, error_msg);
if (vdex_.get() == nullptr) {
*error_msg = StringPrintf("Failed to load vdex file '%s' %s",
vdex_filename.c_str(),
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 62d99fb..111755e 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -201,8 +201,12 @@
// A representation of an invalid OatClass, used when an OatClass can't be found.
// See FindOatClass().
static OatClass Invalid() {
- return OatClass(nullptr, mirror::Class::kStatusError, kOatClassNoneCompiled, 0, nullptr,
- nullptr);
+ return OatClass(/* oat_file */ nullptr,
+ mirror::Class::kStatusErrorUnresolved,
+ kOatClassNoneCompiled,
+ /* bitmap_size */ 0,
+ /* bitmap_pointer */ nullptr,
+ /* methods_pointer */ nullptr);
}
private:
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 8a23457..77cdd28 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -25,6 +25,7 @@
#include "base/logging.h"
#include "compiler_filter.h"
#include "class_linker.h"
+#include "exec_utils.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
#include "image.h"
@@ -315,6 +316,11 @@
}
bool OatFileAssistant::DexChecksumUpToDate(const VdexFile& file, std::string* error_msg) {
+ if (file.GetHeader().GetNumberOfDexFiles() <= 0) {
+ VLOG(oat) << "Vdex does not contain any dex files";
+ return false;
+ }
+
// TODO: Use GetRequiredDexChecksum to get secondary checksums as well, not
// just the primary. Because otherwise we may fail to see a secondary
// checksum failure in the case when the original (multidex) files are
@@ -601,7 +607,7 @@
class_path = OatFile::kSpecialSharedLibrary;
}
argv.push_back(class_path);
- if (runtime->IsDebuggable()) {
+ if (runtime->IsJavaDebuggable()) {
argv.push_back("--debuggable");
}
runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
@@ -851,10 +857,10 @@
// Check to see if there is a vdex file we can make use of.
std::string error_msg;
std::string vdex_filename = ReplaceFileExtension(filename_, "vdex");
- std::unique_ptr<VdexFile> vdex(VdexFile::Open(vdex_filename,
- /*writeable*/false,
- /*low_4gb*/false,
- &error_msg));
+ std::unique_ptr<VdexFile> vdex = VdexFile::Open(vdex_filename,
+ /*writeable*/false,
+ /*low_4gb*/false,
+ &error_msg);
if (vdex == nullptr) {
status_ = kOatCannotOpen;
VLOG(oat) << "unable to open vdex file " << vdex_filename << ": " << error_msg;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 9669dab..f777340 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -14,23 +14,16 @@
* limitations under the License.
*/
-#include <algorithm>
-#include <fstream>
#include <string>
#include <vector>
#include <sys/param.h>
#include "android-base/strings.h"
-#include <backtrace/BacktraceMap.h>
#include <gtest/gtest.h>
#include "art_field-inl.h"
#include "class_linker-inl.h"
-#include "common_runtime_test.h"
-#include "compiler_callbacks.h"
-#include "dex2oat_environment_test.h"
-#include "gc/space/image_space.h"
-#include "mem_map.h"
+#include "dexopt_test.h"
#include "oat_file_assistant.h"
#include "oat_file_manager.h"
#include "os.h"
@@ -40,242 +33,17 @@
namespace art {
-class OatFileAssistantTest : public Dex2oatEnvironmentTest {
- public:
- virtual void SetUp() OVERRIDE {
- ReserveImageSpace();
- Dex2oatEnvironmentTest::SetUp();
- }
+class OatFileAssistantTest : public DexoptTest {};
- // Pre-Relocate the image to a known non-zero offset so we don't have to
- // deal with the runtime randomly relocating the image by 0 and messing up
- // the expected results of the tests.
- bool PreRelocateImage(const std::string& image_location, std::string* error_msg) {
- std::string image;
- if (!GetCachedImageFile(image_location, &image, error_msg)) {
- return false;
- }
-
- std::string patchoat = GetAndroidRoot();
- patchoat += kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat";
-
- std::vector<std::string> argv;
- argv.push_back(patchoat);
- argv.push_back("--input-image-location=" + image_location);
- argv.push_back("--output-image-file=" + image);
- argv.push_back("--instruction-set=" + std::string(GetInstructionSetString(kRuntimeISA)));
- argv.push_back("--base-offset-delta=0x00008000");
- return Exec(argv, error_msg);
- }
-
- virtual void PreRuntimeCreate() {
- std::string error_msg;
- ASSERT_TRUE(PreRelocateImage(GetImageLocation(), &error_msg)) << error_msg;
- ASSERT_TRUE(PreRelocateImage(GetImageLocation2(), &error_msg)) << error_msg;
- UnreserveImageSpace();
- }
-
- virtual void PostRuntimeCreate() OVERRIDE {
- ReserveImageSpace();
- }
-
- // Generate an oat file for the purposes of test.
- void GenerateOatForTest(const std::string& dex_location,
- const std::string& oat_location,
- CompilerFilter::Filter filter,
- bool relocate,
- bool pic,
- bool with_alternate_image) {
- std::string dalvik_cache = GetDalvikCache(GetInstructionSetString(kRuntimeISA));
- std::string dalvik_cache_tmp = dalvik_cache + ".redirected";
-
- if (!relocate) {
- // Temporarily redirect the dalvik cache so dex2oat doesn't find the
- // relocated image file.
- ASSERT_EQ(0, rename(dalvik_cache.c_str(), dalvik_cache_tmp.c_str())) << strerror(errno);
- }
-
- std::vector<std::string> args;
- args.push_back("--dex-file=" + dex_location);
- args.push_back("--oat-file=" + oat_location);
- args.push_back("--compiler-filter=" + CompilerFilter::NameOfFilter(filter));
- args.push_back("--runtime-arg");
-
- // Use -Xnorelocate regardless of the relocate argument.
- // We control relocation by redirecting the dalvik cache when needed
- // rather than use this flag.
- args.push_back("-Xnorelocate");
-
- if (pic) {
- args.push_back("--compile-pic");
- }
-
- std::string image_location = GetImageLocation();
- if (with_alternate_image) {
- args.push_back("--boot-image=" + GetImageLocation2());
- }
-
- std::string error_msg;
- ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
-
- if (!relocate) {
- // Restore the dalvik cache if needed.
- ASSERT_EQ(0, rename(dalvik_cache_tmp.c_str(), dalvik_cache.c_str())) << strerror(errno);
- }
-
- // Verify the odex file was generated as expected.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(oat_location.c_str(),
- oat_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
- dex_location.c_str(),
- &error_msg));
- ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
- EXPECT_EQ(pic, odex_file->IsPic());
- EXPECT_EQ(filter, odex_file->GetCompilerFilter());
-
- std::unique_ptr<ImageHeader> image_header(
- gc::space::ImageSpace::ReadImageHeader(image_location.c_str(),
- kRuntimeISA,
- &error_msg));
- ASSERT_TRUE(image_header != nullptr) << error_msg;
- const OatHeader& oat_header = odex_file->GetOatHeader();
- uint32_t combined_checksum = OatFileAssistant::CalculateCombinedImageChecksum();
-
- if (CompilerFilter::DependsOnImageChecksum(filter)) {
- if (with_alternate_image) {
- EXPECT_NE(combined_checksum, oat_header.GetImageFileLocationOatChecksum());
- } else {
- EXPECT_EQ(combined_checksum, oat_header.GetImageFileLocationOatChecksum());
- }
- }
-
- if (!with_alternate_image) {
- if (CompilerFilter::IsBytecodeCompilationEnabled(filter)) {
- if (relocate) {
- EXPECT_EQ(reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin()),
- oat_header.GetImageFileLocationOatDataBegin());
- EXPECT_EQ(image_header->GetPatchDelta(), oat_header.GetImagePatchDelta());
- } else {
- EXPECT_NE(reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin()),
- oat_header.GetImageFileLocationOatDataBegin());
- EXPECT_NE(image_header->GetPatchDelta(), oat_header.GetImagePatchDelta());
- }
- }
- }
- }
-
- // Generate a non-PIC odex file for the purposes of test.
- // The generated odex file will be un-relocated.
- void GenerateOdexForTest(const std::string& dex_location,
- const std::string& odex_location,
- CompilerFilter::Filter filter) {
- GenerateOatForTest(dex_location,
- odex_location,
- filter,
- /*relocate*/false,
- /*pic*/false,
- /*with_alternate_image*/false);
- }
-
- void GeneratePicOdexForTest(const std::string& dex_location,
- const std::string& odex_location,
- CompilerFilter::Filter filter) {
- GenerateOatForTest(dex_location,
- odex_location,
- filter,
- /*relocate*/false,
- /*pic*/true,
- /*with_alternate_image*/false);
- }
-
- // Generate an oat file in the oat location.
- void GenerateOatForTest(const char* dex_location,
- CompilerFilter::Filter filter,
- bool relocate,
- bool pic,
- bool with_alternate_image) {
- std::string oat_location;
- std::string error_msg;
- ASSERT_TRUE(OatFileAssistant::DexLocationToOatFilename(
- dex_location, kRuntimeISA, &oat_location, &error_msg)) << error_msg;
- GenerateOatForTest(dex_location,
- oat_location,
- filter,
- relocate,
- pic,
- with_alternate_image);
- }
-
- // Generate a standard oat file in the oat location.
- void GenerateOatForTest(const char* dex_location, CompilerFilter::Filter filter) {
- GenerateOatForTest(dex_location,
- filter,
- /*relocate*/true,
- /*pic*/false,
- /*with_alternate_image*/false);
- }
-
- private:
- // Reserve memory around where the image will be loaded so other memory
- // won't conflict when it comes time to load the image.
- // This can be called with an already loaded image to reserve the space
- // around it.
- void ReserveImageSpace() {
- MemMap::Init();
-
- // Ensure a chunk of memory is reserved for the image space.
- // The reservation_end includes room for the main space that has to come
- // right after the image in case of the GSS collector.
- uintptr_t reservation_start = ART_BASE_ADDRESS;
- uintptr_t reservation_end = ART_BASE_ADDRESS + 384 * MB;
-
- std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
- ASSERT_TRUE(map.get() != nullptr) << "Failed to build process map";
- for (BacktraceMap::const_iterator it = map->begin();
- reservation_start < reservation_end && it != map->end(); ++it) {
- ReserveImageSpaceChunk(reservation_start, std::min(it->start, reservation_end));
- reservation_start = std::max(reservation_start, it->end);
- }
- ReserveImageSpaceChunk(reservation_start, reservation_end);
- }
-
- // Reserve a chunk of memory for the image space in the given range.
- // Only has effect for chunks with a positive number of bytes.
- void ReserveImageSpaceChunk(uintptr_t start, uintptr_t end) {
- if (start < end) {
- std::string error_msg;
- image_reservation_.push_back(std::unique_ptr<MemMap>(
- MemMap::MapAnonymous("image reservation",
- reinterpret_cast<uint8_t*>(start), end - start,
- PROT_NONE, false, false, &error_msg)));
- ASSERT_TRUE(image_reservation_.back().get() != nullptr) << error_msg;
- LOG(INFO) << "Reserved space for image " <<
- reinterpret_cast<void*>(image_reservation_.back()->Begin()) << "-" <<
- reinterpret_cast<void*>(image_reservation_.back()->End());
- }
- }
-
-
- // Unreserve any memory reserved by ReserveImageSpace. This should be called
- // before the image is loaded.
- void UnreserveImageSpace() {
- image_reservation_.clear();
- }
-
- std::vector<std::unique_ptr<MemMap>> image_reservation_;
-};
-
-class OatFileAssistantNoDex2OatTest : public OatFileAssistantTest {
+class OatFileAssistantNoDex2OatTest : public DexoptTest {
public:
virtual void SetUpRuntimeOptions(RuntimeOptions* options) {
- OatFileAssistantTest::SetUpRuntimeOptions(options);
+ DexoptTest::SetUpRuntimeOptions(options);
options->push_back(std::make_pair("-Xnodex2oat", nullptr));
}
};
+
// Case: We have a DEX file, but no OAT file for it.
// Expect: The status is kDex2OatNeeded.
TEST_F(OatFileAssistantTest, DexNoOat) {
@@ -379,6 +147,21 @@
oat_file_assistant.GetStatusDump();
}
+// Case: We have a DEX file and empty VDEX and ODEX files.
+TEST_F(OatFileAssistantTest, EmptyVdexOdex) {
+ std::string dex_location = GetScratchDir() + "/EmptyVdexOdex.jar";
+ std::string odex_location = GetOdexDir() + "/EmptyVdexOdex.oat";
+ std::string vdex_location = GetOdexDir() + "/EmptyVdexOdex.vdex";
+
+ Copy(GetDexSrc1(), dex_location);
+ ScratchFile vdex_file(vdex_location.c_str());
+ ScratchFile odex_file(odex_location.c_str());
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+ EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+}
+
// Case: We have a DEX file and up-to-date (OAT) VDEX file for it, but no OAT
// file.
TEST_F(OatFileAssistantTest, VdexUpToDateNoOat) {
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 33bd0f3..a46b470 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -22,6 +22,7 @@
#include "android-base/stringprintf.h"
+#include "art_field-inl.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/systrace.h"
@@ -32,11 +33,13 @@
#include "handle_scope-inl.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
+#include "mirror/object-inl.h"
#include "oat_file_assistant.h"
#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
#include "thread_list.h"
+#include "well_known_classes.h"
namespace art {
diff --git a/runtime/openjdkjvm/OpenjdkJvm.cc b/runtime/openjdkjvm/OpenjdkJvm.cc
index 2f51e27..bdaad20 100644
--- a/runtime/openjdkjvm/OpenjdkJvm.cc
+++ b/runtime/openjdkjvm/OpenjdkJvm.cc
@@ -46,7 +46,7 @@
#include "scoped_thread_state_change-inl.h"
#include "ScopedUtfChars.h"
#include "mirror/class_loader.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
#include "base/logging.h"
#include "base/macros.h"
#include "../../libcore/ojluni/src/main/native/jvm.h" // TODO(narayan): fix it
diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp
index 976a1e7..c01e3f4 100644
--- a/runtime/openjdkjvmti/Android.bp
+++ b/runtime/openjdkjvmti/Android.bp
@@ -22,6 +22,7 @@
"OpenjdkJvmTi.cc",
"ti_class.cc",
"ti_class_definition.cc",
+ "ti_class_loader.cc",
"ti_dump.cc",
"ti_field.cc",
"ti_heap.cc",
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index 417d104..a815a60 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -138,6 +138,7 @@
}
static jvmtiError SuspendThread(jvmtiEnv* env, jthread thread) {
+ ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
@@ -145,10 +146,12 @@
jint request_count,
const jthread* request_list,
jvmtiError* results) {
+ ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ResumeThread(jvmtiEnv* env, jthread thread) {
+ ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
@@ -156,14 +159,17 @@
jint request_count,
const jthread* request_list,
jvmtiError* results) {
+ ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError StopThread(jvmtiEnv* env, jthread thread, jobject exception) {
+ ENSURE_HAS_CAP(env, can_signal_thread);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError InterruptThread(jvmtiEnv* env, jthread thread) {
+ ENSURE_HAS_CAP(env, can_signal_thread);
return ERR(NOT_IMPLEMENTED);
}
@@ -175,6 +181,7 @@
jthread thread,
jint* owned_monitor_count_ptr,
jobject** owned_monitors_ptr) {
+ ENSURE_HAS_CAP(env, can_get_owned_monitor_info);
return ERR(NOT_IMPLEMENTED);
}
@@ -182,12 +189,14 @@
jthread thread,
jint* monitor_info_count_ptr,
jvmtiMonitorStackDepthInfo** monitor_info_ptr) {
+ ENSURE_HAS_CAP(env, can_get_owned_monitor_stack_depth_info);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetCurrentContendedMonitor(jvmtiEnv* env,
jthread thread,
jobject* monitor_ptr) {
+ ENSURE_HAS_CAP(env, can_get_current_contended_monitor);
return ERR(NOT_IMPLEMENTED);
}
@@ -271,6 +280,7 @@
}
static jvmtiError PopFrame(jvmtiEnv* env, jthread thread) {
+ ENSURE_HAS_CAP(env, can_pop_frame);
return ERR(NOT_IMPLEMENTED);
}
@@ -283,30 +293,37 @@
}
static jvmtiError NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
+ ENSURE_HAS_CAP(env, can_generate_frame_pop_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ForceEarlyReturnObject(jvmtiEnv* env, jthread thread, jobject value) {
+ ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ForceEarlyReturnInt(jvmtiEnv* env, jthread thread, jint value) {
+ ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ForceEarlyReturnLong(jvmtiEnv* env, jthread thread, jlong value) {
+ ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ForceEarlyReturnFloat(jvmtiEnv* env, jthread thread, jfloat value) {
+ ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ForceEarlyReturnDouble(jvmtiEnv* env, jthread thread, jdouble value) {
+ ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ForceEarlyReturnVoid(jvmtiEnv* env, jthread thread) {
+ ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -316,6 +333,7 @@
jobject initial_object,
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
+ ENSURE_HAS_CAP(env, can_tag_objects);
HeapUtil heap_util(&gObjectTagTable);
return heap_util.FollowReferences(env,
heap_filter,
@@ -402,6 +420,7 @@
jobject object,
jvmtiObjectReferenceCallback object_reference_callback,
const void* user_data) {
+ ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -410,6 +429,7 @@
jvmtiStackReferenceCallback stack_ref_callback,
jvmtiObjectReferenceCallback object_ref_callback,
const void* user_data) {
+ ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -417,6 +437,7 @@
jvmtiHeapObjectFilter object_filter,
jvmtiHeapObjectCallback heap_object_callback,
const void* user_data) {
+ ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -425,6 +446,7 @@
jvmtiHeapObjectFilter object_filter,
jvmtiHeapObjectCallback heap_object_callback,
const void* user_data) {
+ ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -433,6 +455,7 @@
jint depth,
jint slot,
jobject* value_ptr) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -440,6 +463,7 @@
jthread thread,
jint depth,
jobject* value_ptr) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -448,6 +472,7 @@
jint depth,
jint slot,
jint* value_ptr) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -456,6 +481,7 @@
jint depth,
jint slot,
jlong* value_ptr) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -464,6 +490,7 @@
jint depth,
jint slot,
jfloat* value_ptr) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -472,6 +499,7 @@
jint depth,
jint slot,
jdouble* value_ptr) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -480,6 +508,7 @@
jint depth,
jint slot,
jobject value) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -488,6 +517,7 @@
jint depth,
jint slot,
jint value) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -496,6 +526,7 @@
jint depth,
jint slot,
jlong value) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -504,6 +535,7 @@
jint depth,
jint slot,
jfloat value) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -512,30 +544,37 @@
jint depth,
jint slot,
jdouble value) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError SetBreakpoint(jvmtiEnv* env, jmethodID method, jlocation location) {
+ ENSURE_HAS_CAP(env, can_generate_breakpoint_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ClearBreakpoint(jvmtiEnv* env, jmethodID method, jlocation location) {
+ ENSURE_HAS_CAP(env, can_generate_breakpoint_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError SetFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
+ ENSURE_HAS_CAP(env, can_generate_field_access_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ClearFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
+ ENSURE_HAS_CAP(env, can_generate_field_access_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError SetFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
+ ENSURE_HAS_CAP(env, can_generate_field_modification_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ClearFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
+ ENSURE_HAS_CAP(env, can_generate_field_modification_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -563,6 +602,7 @@
}
static jvmtiError GetSourceFileName(jvmtiEnv* env, jclass klass, char** source_name_ptr) {
+ ENSURE_HAS_CAP(env, can_get_source_file_name);
return ERR(NOT_IMPLEMENTED);
}
@@ -603,6 +643,7 @@
jint* constant_pool_count_ptr,
jint* constant_pool_byte_count_ptr,
unsigned char** constant_pool_bytes_ptr) {
+ ENSURE_HAS_CAP(env, can_get_constant_pool);
return ERR(NOT_IMPLEMENTED);
}
@@ -629,10 +670,12 @@
static jvmtiError GetSourceDebugExtension(jvmtiEnv* env,
jclass klass,
char** source_debug_extension_ptr) {
+ ENSURE_HAS_CAP(env, can_get_source_debug_extension);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError RetransformClasses(jvmtiEnv* env, jint class_count, const jclass* classes) {
+ ENSURE_HAS_CAP(env, can_retransform_classes);
std::string error_msg;
jvmtiError res = Transformer::RetransformClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
art::Runtime::Current(),
@@ -649,6 +692,7 @@
static jvmtiError RedefineClasses(jvmtiEnv* env,
jint class_count,
const jvmtiClassDefinition* class_definitions) {
+ ENSURE_HAS_CAP(env, can_redefine_classes);
std::string error_msg;
jvmtiError res = Redefiner::RedefineClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
art::Runtime::Current(),
@@ -673,6 +717,7 @@
static jvmtiError GetObjectMonitorUsage(jvmtiEnv* env,
jobject object,
jvmtiMonitorUsage* info_ptr) {
+ ENSURE_HAS_CAP(env, can_get_monitor_info);
return ERR(NOT_IMPLEMENTED);
}
@@ -703,6 +748,7 @@
jclass klass,
jfieldID field,
jboolean* is_synthetic_ptr) {
+ ENSURE_HAS_CAP(env, can_get_synthetic_attribute);
return FieldUtil::IsFieldSynthetic(env, klass, field, is_synthetic_ptr);
}
@@ -742,6 +788,7 @@
jmethodID method,
jint* entry_count_ptr,
jvmtiLineNumberEntry** table_ptr) {
+ ENSURE_HAS_CAP(env, can_get_line_numbers);
return MethodUtil::GetLineNumberTable(env, method, entry_count_ptr, table_ptr);
}
@@ -756,6 +803,7 @@
jmethodID method,
jint* entry_count_ptr,
jvmtiLocalVariableEntry** table_ptr) {
+ ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -763,6 +811,7 @@
jmethodID method,
jint* bytecode_count_ptr,
unsigned char** bytecodes_ptr) {
+ ENSURE_HAS_CAP(env, can_get_bytecodes);
return ERR(NOT_IMPLEMENTED);
}
@@ -771,6 +820,7 @@
}
static jvmtiError IsMethodSynthetic(jvmtiEnv* env, jmethodID method, jboolean* is_synthetic_ptr) {
+ ENSURE_HAS_CAP(env, can_get_synthetic_attribute);
return MethodUtil::IsMethodSynthetic(env, method, is_synthetic_ptr);
}
@@ -779,10 +829,12 @@
}
static jvmtiError SetNativeMethodPrefix(jvmtiEnv* env, const char* prefix) {
+ ENSURE_HAS_CAP(env, can_set_native_method_prefix);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError SetNativeMethodPrefixes(jvmtiEnv* env, jint prefix_count, char** prefixes) {
+ ENSURE_HAS_CAP(env, can_set_native_method_prefix);
return ERR(NOT_IMPLEMENTED);
}
@@ -855,7 +907,6 @@
jthread event_thread,
...) {
ENSURE_VALID_ENV(env);
- // TODO: Check for capabilities.
art::Thread* art_thread = nullptr;
if (event_thread != nullptr) {
// TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
@@ -1053,18 +1104,22 @@
}
static jvmtiError GetCurrentThreadCpuTimerInfo(jvmtiEnv* env, jvmtiTimerInfo* info_ptr) {
+ ENSURE_HAS_CAP(env, can_get_current_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetCurrentThreadCpuTime(jvmtiEnv* env, jlong* nanos_ptr) {
+ ENSURE_HAS_CAP(env, can_get_current_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetThreadCpuTimerInfo(jvmtiEnv* env, jvmtiTimerInfo* info_ptr) {
+ ENSURE_HAS_CAP(env, can_get_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetThreadCpuTime(jvmtiEnv* env, jthread thread, jlong* nanos_ptr) {
+ ENSURE_HAS_CAP(env, can_get_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
@@ -1313,6 +1368,7 @@
ThreadUtil::Register(&gEventHandler);
ClassUtil::Register(&gEventHandler);
DumpUtil::Register(&gEventHandler);
+ SearchUtil::Register();
runtime->GetJavaVM()->AddEnvironmentHook(GetEnvHandler);
runtime->AddSystemWeakHolder(&gObjectTagTable);
@@ -1325,6 +1381,7 @@
ThreadUtil::Unregister();
ClassUtil::Unregister();
DumpUtil::Unregister();
+ SearchUtil::Unregister();
return true;
}
diff --git a/runtime/openjdkjvmti/art_jvmti.h b/runtime/openjdkjvmti/art_jvmti.h
index 256c3a6..106165c 100644
--- a/runtime/openjdkjvmti/art_jvmti.h
+++ b/runtime/openjdkjvmti/art_jvmti.h
@@ -114,6 +114,21 @@
}
ALWAYS_INLINE
+static inline jvmtiError CopyDataIntoJvmtiBuffer(ArtJvmTiEnv* env,
+ const unsigned char* source,
+ jint len,
+ /*out*/unsigned char** dest) {
+ jvmtiError res = env->Allocate(len, dest);
+ if (res != OK) {
+ return res;
+ }
+ memcpy(reinterpret_cast<void*>(*dest),
+ reinterpret_cast<const void*>(source),
+ len);
+ return OK;
+}
+
+ALWAYS_INLINE
static inline jvmtiError CopyString(jvmtiEnv* env, const char* src, unsigned char** copy) {
size_t len = strlen(src) + 1;
unsigned char* buf;
@@ -131,7 +146,7 @@
.can_generate_field_modification_events = 0,
.can_generate_field_access_events = 0,
.can_get_bytecodes = 0,
- .can_get_synthetic_attribute = 0,
+ .can_get_synthetic_attribute = 1,
.can_get_owned_monitor_info = 0,
.can_get_current_contended_monitor = 0,
.can_get_monitor_info = 0,
@@ -139,7 +154,7 @@
.can_redefine_classes = 1,
.can_signal_thread = 0,
.can_get_source_file_name = 0,
- .can_get_line_numbers = 0,
+ .can_get_line_numbers = 1,
.can_get_source_debug_extension = 0,
.can_access_local_variables = 0,
.can_maintain_original_method_order = 0,
@@ -156,10 +171,10 @@
.can_generate_all_class_hook_events = 0,
.can_generate_compiled_method_load_events = 0,
.can_generate_monitor_events = 0,
- .can_generate_vm_object_alloc_events = 0,
+ .can_generate_vm_object_alloc_events = 1,
.can_generate_native_method_bind_events = 0,
- .can_generate_garbage_collection_events = 0,
- .can_generate_object_free_events = 0,
+ .can_generate_garbage_collection_events = 1,
+ .can_generate_object_free_events = 1,
.can_force_early_return = 0,
.can_get_owned_monitor_stack_depth_info = 0,
.can_get_constant_pool = 0,
diff --git a/runtime/openjdkjvmti/events-inl.h b/runtime/openjdkjvmti/events-inl.h
index 21ec731..4f5eb0c 100644
--- a/runtime/openjdkjvmti/events-inl.h
+++ b/runtime/openjdkjvmti/events-inl.h
@@ -37,96 +37,84 @@
}
}
-template <typename FnType>
-ALWAYS_INLINE static inline FnType* GetCallback(ArtJvmTiEnv* env, ArtJvmtiEvent event) {
- if (env->event_callbacks == nullptr) {
- return nullptr;
- }
+namespace impl {
- // TODO: Add a type check. Can be done, for example, by an explicitly instantiated template
- // function.
+// Infrastructure to achieve type safety for event dispatch.
- switch (event) {
- case ArtJvmtiEvent::kVmInit:
- return reinterpret_cast<FnType*>(env->event_callbacks->VMInit);
- case ArtJvmtiEvent::kVmDeath:
- return reinterpret_cast<FnType*>(env->event_callbacks->VMDeath);
- case ArtJvmtiEvent::kThreadStart:
- return reinterpret_cast<FnType*>(env->event_callbacks->ThreadStart);
- case ArtJvmtiEvent::kThreadEnd:
- return reinterpret_cast<FnType*>(env->event_callbacks->ThreadEnd);
- case ArtJvmtiEvent::kClassFileLoadHookRetransformable:
- case ArtJvmtiEvent::kClassFileLoadHookNonRetransformable:
- return reinterpret_cast<FnType*>(env->event_callbacks->ClassFileLoadHook);
- case ArtJvmtiEvent::kClassLoad:
- return reinterpret_cast<FnType*>(env->event_callbacks->ClassLoad);
- case ArtJvmtiEvent::kClassPrepare:
- return reinterpret_cast<FnType*>(env->event_callbacks->ClassPrepare);
- case ArtJvmtiEvent::kVmStart:
- return reinterpret_cast<FnType*>(env->event_callbacks->VMStart);
- case ArtJvmtiEvent::kException:
- return reinterpret_cast<FnType*>(env->event_callbacks->Exception);
- case ArtJvmtiEvent::kExceptionCatch:
- return reinterpret_cast<FnType*>(env->event_callbacks->ExceptionCatch);
- case ArtJvmtiEvent::kSingleStep:
- return reinterpret_cast<FnType*>(env->event_callbacks->SingleStep);
- case ArtJvmtiEvent::kFramePop:
- return reinterpret_cast<FnType*>(env->event_callbacks->FramePop);
- case ArtJvmtiEvent::kBreakpoint:
- return reinterpret_cast<FnType*>(env->event_callbacks->Breakpoint);
- case ArtJvmtiEvent::kFieldAccess:
- return reinterpret_cast<FnType*>(env->event_callbacks->FieldAccess);
- case ArtJvmtiEvent::kFieldModification:
- return reinterpret_cast<FnType*>(env->event_callbacks->FieldModification);
- case ArtJvmtiEvent::kMethodEntry:
- return reinterpret_cast<FnType*>(env->event_callbacks->MethodEntry);
- case ArtJvmtiEvent::kMethodExit:
- return reinterpret_cast<FnType*>(env->event_callbacks->MethodExit);
- case ArtJvmtiEvent::kNativeMethodBind:
- return reinterpret_cast<FnType*>(env->event_callbacks->NativeMethodBind);
- case ArtJvmtiEvent::kCompiledMethodLoad:
- return reinterpret_cast<FnType*>(env->event_callbacks->CompiledMethodLoad);
- case ArtJvmtiEvent::kCompiledMethodUnload:
- return reinterpret_cast<FnType*>(env->event_callbacks->CompiledMethodUnload);
- case ArtJvmtiEvent::kDynamicCodeGenerated:
- return reinterpret_cast<FnType*>(env->event_callbacks->DynamicCodeGenerated);
- case ArtJvmtiEvent::kDataDumpRequest:
- return reinterpret_cast<FnType*>(env->event_callbacks->DataDumpRequest);
- case ArtJvmtiEvent::kMonitorWait:
- return reinterpret_cast<FnType*>(env->event_callbacks->MonitorWait);
- case ArtJvmtiEvent::kMonitorWaited:
- return reinterpret_cast<FnType*>(env->event_callbacks->MonitorWaited);
- case ArtJvmtiEvent::kMonitorContendedEnter:
- return reinterpret_cast<FnType*>(env->event_callbacks->MonitorContendedEnter);
- case ArtJvmtiEvent::kMonitorContendedEntered:
- return reinterpret_cast<FnType*>(env->event_callbacks->MonitorContendedEntered);
- case ArtJvmtiEvent::kResourceExhausted:
- return reinterpret_cast<FnType*>(env->event_callbacks->ResourceExhausted);
- case ArtJvmtiEvent::kGarbageCollectionStart:
- return reinterpret_cast<FnType*>(env->event_callbacks->GarbageCollectionStart);
- case ArtJvmtiEvent::kGarbageCollectionFinish:
- return reinterpret_cast<FnType*>(env->event_callbacks->GarbageCollectionFinish);
- case ArtJvmtiEvent::kObjectFree:
- return reinterpret_cast<FnType*>(env->event_callbacks->ObjectFree);
- case ArtJvmtiEvent::kVmObjectAlloc:
- return reinterpret_cast<FnType*>(env->event_callbacks->VMObjectAlloc);
- }
- return nullptr;
+#define FORALL_EVENT_TYPES(fn) \
+ fn(VMInit, ArtJvmtiEvent::kVmInit) \
+ fn(VMDeath, ArtJvmtiEvent::kVmDeath) \
+ fn(ThreadStart, ArtJvmtiEvent::kThreadStart) \
+ fn(ThreadEnd, ArtJvmtiEvent::kThreadEnd) \
+ fn(ClassFileLoadHook, ArtJvmtiEvent::kClassFileLoadHookRetransformable) \
+ fn(ClassFileLoadHook, ArtJvmtiEvent::kClassFileLoadHookNonRetransformable) \
+ fn(ClassLoad, ArtJvmtiEvent::kClassLoad) \
+ fn(ClassPrepare, ArtJvmtiEvent::kClassPrepare) \
+ fn(VMStart, ArtJvmtiEvent::kVmStart) \
+ fn(Exception, ArtJvmtiEvent::kException) \
+ fn(ExceptionCatch, ArtJvmtiEvent::kExceptionCatch) \
+ fn(SingleStep, ArtJvmtiEvent::kSingleStep) \
+ fn(FramePop, ArtJvmtiEvent::kFramePop) \
+ fn(Breakpoint, ArtJvmtiEvent::kBreakpoint) \
+ fn(FieldAccess, ArtJvmtiEvent::kFieldAccess) \
+ fn(FieldModification, ArtJvmtiEvent::kFieldModification) \
+ fn(MethodEntry, ArtJvmtiEvent::kMethodEntry) \
+ fn(MethodExit, ArtJvmtiEvent::kMethodExit) \
+ fn(NativeMethodBind, ArtJvmtiEvent::kNativeMethodBind) \
+ fn(CompiledMethodLoad, ArtJvmtiEvent::kCompiledMethodLoad) \
+ fn(CompiledMethodUnload, ArtJvmtiEvent::kCompiledMethodUnload) \
+ fn(DynamicCodeGenerated, ArtJvmtiEvent::kDynamicCodeGenerated) \
+ fn(DataDumpRequest, ArtJvmtiEvent::kDataDumpRequest) \
+ fn(MonitorWait, ArtJvmtiEvent::kMonitorWait) \
+ fn(MonitorWaited, ArtJvmtiEvent::kMonitorWaited) \
+ fn(MonitorContendedEnter, ArtJvmtiEvent::kMonitorContendedEnter) \
+ fn(MonitorContendedEntered, ArtJvmtiEvent::kMonitorContendedEntered) \
+ fn(ResourceExhausted, ArtJvmtiEvent::kResourceExhausted) \
+ fn(GarbageCollectionStart, ArtJvmtiEvent::kGarbageCollectionStart) \
+ fn(GarbageCollectionFinish, ArtJvmtiEvent::kGarbageCollectionFinish) \
+ fn(ObjectFree, ArtJvmtiEvent::kObjectFree) \
+ fn(VMObjectAlloc, ArtJvmtiEvent::kVmObjectAlloc)
+
+template <ArtJvmtiEvent kEvent>
+struct EventFnType {
+};
+
+#define EVENT_FN_TYPE(name, enum_name) \
+template <> \
+struct EventFnType<enum_name> { \
+ using type = decltype(jvmtiEventCallbacks().name); \
+};
+
+FORALL_EVENT_TYPES(EVENT_FN_TYPE)
+
+#undef EVENT_FN_TYPE
+
+template <ArtJvmtiEvent kEvent>
+ALWAYS_INLINE inline typename EventFnType<kEvent>::type GetCallback(ArtJvmTiEnv* env);
+
+#define GET_CALLBACK(name, enum_name) \
+template <> \
+ALWAYS_INLINE inline EventFnType<enum_name>::type GetCallback<enum_name>( \
+ ArtJvmTiEnv* env) { \
+ if (env->event_callbacks == nullptr) { \
+ return nullptr; \
+ } \
+ return env->event_callbacks->name; \
}
-template <typename ...Args>
-inline void EventHandler::DispatchClassFileLoadHookEvent(art::Thread*,
- ArtJvmtiEvent event,
- Args... args ATTRIBUTE_UNUSED) const {
- CHECK(event == ArtJvmtiEvent::kClassFileLoadHookRetransformable ||
- event == ArtJvmtiEvent::kClassFileLoadHookNonRetransformable);
- LOG(FATAL) << "Incorrect arguments to ClassFileLoadHook!";
-}
+FORALL_EVENT_TYPES(GET_CALLBACK)
+#undef GET_CALLBACK
+
+#undef FORALL_EVENT_TYPES
+
+} // namespace impl
+
+// C++ does not allow partial template function specialization. The dispatch for our separated
+// ClassFileLoadHook event types is the same, so use this helper for code deduplication.
// TODO Locking of some type!
-template <>
+template <ArtJvmtiEvent kEvent>
inline void EventHandler::DispatchClassFileLoadHookEvent(art::Thread* thread,
- ArtJvmtiEvent event,
JNIEnv* jnienv,
jclass class_being_redefined,
jobject loader,
@@ -136,26 +124,16 @@
const unsigned char* class_data,
jint* new_class_data_len,
unsigned char** new_class_data) const {
- CHECK(event == ArtJvmtiEvent::kClassFileLoadHookRetransformable ||
- event == ArtJvmtiEvent::kClassFileLoadHookNonRetransformable);
- using FnType = void(jvmtiEnv* /* jvmti_env */,
- JNIEnv* /* jnienv */,
- jclass /* class_being_redefined */,
- jobject /* loader */,
- const char* /* name */,
- jobject /* protection_domain */,
- jint /* class_data_len */,
- const unsigned char* /* class_data */,
- jint* /* new_class_data_len */,
- unsigned char** /* new_class_data */);
+ static_assert(kEvent == ArtJvmtiEvent::kClassFileLoadHookRetransformable ||
+ kEvent == ArtJvmtiEvent::kClassFileLoadHookNonRetransformable, "Unsupported event");
jint current_len = class_data_len;
unsigned char* current_class_data = const_cast<unsigned char*>(class_data);
ArtJvmTiEnv* last_env = nullptr;
for (ArtJvmTiEnv* env : envs) {
- if (ShouldDispatch(event, env, thread)) {
- jint new_len;
- unsigned char* new_data;
- FnType* callback = GetCallback<FnType>(env, event);
+ if (ShouldDispatch<kEvent>(env, thread)) {
+ jint new_len = 0;
+ unsigned char* new_data = nullptr;
+ auto callback = impl::GetCallback<kEvent>(env);
callback(env,
jnienv,
class_being_redefined,
@@ -186,28 +164,16 @@
}
}
-template <typename ...Args>
-inline void EventHandler::DispatchEvent(art::Thread* thread,
- ArtJvmtiEvent event,
- Args... args) const {
- switch (event) {
- case ArtJvmtiEvent::kClassFileLoadHookRetransformable:
- case ArtJvmtiEvent::kClassFileLoadHookNonRetransformable:
- return DispatchClassFileLoadHookEvent(thread, event, args...);
- default:
- return GenericDispatchEvent(thread, event, args...);
- }
-}
+// Our goal for DispatchEvent: Do not allow implicit type conversion. Types of ...args must match
+// exactly the argument types of the corresponding Jvmti kEvent function pointer.
-// TODO Locking of some type!
-template <typename ...Args>
-inline void EventHandler::GenericDispatchEvent(art::Thread* thread,
- ArtJvmtiEvent event,
- Args... args) const {
+template <ArtJvmtiEvent kEvent, typename ...Args>
+inline void EventHandler::DispatchEvent(art::Thread* thread,
+ Args... args) const {
using FnType = void(jvmtiEnv*, Args...);
for (ArtJvmTiEnv* env : envs) {
- if (ShouldDispatch(event, env, thread)) {
- FnType* callback = GetCallback<FnType>(env, event);
+ if (ShouldDispatch<kEvent>(env, thread)) {
+ FnType* callback = impl::GetCallback<kEvent>(env);
if (callback != nullptr) {
(*callback)(env, args...);
}
@@ -215,14 +181,66 @@
}
}
-inline bool EventHandler::ShouldDispatch(ArtJvmtiEvent event,
- ArtJvmTiEnv* env,
- art::Thread* thread) {
- bool dispatch = env->event_masks.global_event_mask.Test(event);
+// C++ does not allow partial template function specialization. The dispatch for our separated
+// ClassFileLoadHook event types is the same, and in the DispatchClassFileLoadHookEvent helper.
+// The following two DispatchEvent specializations dispatch to it.
+template <>
+inline void EventHandler::DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
+ art::Thread* thread,
+ JNIEnv* jnienv,
+ jclass class_being_redefined,
+ jobject loader,
+ const char* name,
+ jobject protection_domain,
+ jint class_data_len,
+ const unsigned char* class_data,
+ jint* new_class_data_len,
+ unsigned char** new_class_data) const {
+ return DispatchClassFileLoadHookEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
+ thread,
+ jnienv,
+ class_being_redefined,
+ loader,
+ name,
+ protection_domain,
+ class_data_len,
+ class_data,
+ new_class_data_len,
+ new_class_data);
+}
+template <>
+inline void EventHandler::DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookNonRetransformable>(
+ art::Thread* thread,
+ JNIEnv* jnienv,
+ jclass class_being_redefined,
+ jobject loader,
+ const char* name,
+ jobject protection_domain,
+ jint class_data_len,
+ const unsigned char* class_data,
+ jint* new_class_data_len,
+ unsigned char** new_class_data) const {
+ return DispatchClassFileLoadHookEvent<ArtJvmtiEvent::kClassFileLoadHookNonRetransformable>(
+ thread,
+ jnienv,
+ class_being_redefined,
+ loader,
+ name,
+ protection_domain,
+ class_data_len,
+ class_data,
+ new_class_data_len,
+ new_class_data);
+}
- if (!dispatch && thread != nullptr && env->event_masks.unioned_thread_event_mask.Test(event)) {
+template <ArtJvmtiEvent kEvent>
+inline bool EventHandler::ShouldDispatch(ArtJvmTiEnv* env,
+ art::Thread* thread) {
+ bool dispatch = env->event_masks.global_event_mask.Test(kEvent);
+
+ if (!dispatch && thread != nullptr && env->event_masks.unioned_thread_event_mask.Test(kEvent)) {
EventMask* mask = env->event_masks.GetEventMaskOrNull(thread);
- dispatch = mask != nullptr && mask->Test(event);
+ dispatch = mask != nullptr && mask->Test(kEvent);
}
return dispatch;
}
diff --git a/runtime/openjdkjvmti/events.cc b/runtime/openjdkjvmti/events.cc
index d3f8001..34492a9 100644
--- a/runtime/openjdkjvmti/events.cc
+++ b/runtime/openjdkjvmti/events.cc
@@ -206,13 +206,12 @@
ScopedLocalRef<jclass> klass(
jni_env, jni_env->AddLocalReference<jclass>(obj->Ptr()->GetClass()));
- handler_->DispatchEvent(self,
- ArtJvmtiEvent::kVmObjectAlloc,
- jni_env,
- thread.get(),
- object.get(),
- klass.get(),
- static_cast<jlong>(byte_count));
+ handler_->DispatchEvent<ArtJvmtiEvent::kVmObjectAlloc>(self,
+ reinterpret_cast<JNIEnv*>(jni_env),
+ thread.get(),
+ object.get(),
+ klass.get(),
+ static_cast<jlong>(byte_count));
}
}
@@ -241,11 +240,11 @@
finish_enabled_(false) {}
void StartPause() OVERRIDE {
- handler_->DispatchEvent(nullptr, ArtJvmtiEvent::kGarbageCollectionStart);
+ handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionStart>(nullptr);
}
void EndPause() OVERRIDE {
- handler_->DispatchEvent(nullptr, ArtJvmtiEvent::kGarbageCollectionFinish);
+ handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionFinish>(nullptr);
}
bool IsEnabled() {
@@ -303,6 +302,64 @@
}
}
+// Checks to see if the env has the capabilities associated with the given event.
+static bool HasAssociatedCapability(ArtJvmTiEnv* env,
+ ArtJvmtiEvent event) {
+ jvmtiCapabilities caps = env->capabilities;
+ switch (event) {
+ case ArtJvmtiEvent::kBreakpoint:
+ return caps.can_generate_breakpoint_events == 1;
+
+ case ArtJvmtiEvent::kCompiledMethodLoad:
+ case ArtJvmtiEvent::kCompiledMethodUnload:
+ return caps.can_generate_compiled_method_load_events == 1;
+
+ case ArtJvmtiEvent::kException:
+ case ArtJvmtiEvent::kExceptionCatch:
+ return caps.can_generate_exception_events == 1;
+
+ case ArtJvmtiEvent::kFieldAccess:
+ return caps.can_generate_field_access_events == 1;
+
+ case ArtJvmtiEvent::kFieldModification:
+ return caps.can_generate_field_modification_events == 1;
+
+ case ArtJvmtiEvent::kFramePop:
+ return caps.can_generate_frame_pop_events == 1;
+
+ case ArtJvmtiEvent::kGarbageCollectionStart:
+ case ArtJvmtiEvent::kGarbageCollectionFinish:
+ return caps.can_generate_garbage_collection_events == 1;
+
+ case ArtJvmtiEvent::kMethodEntry:
+ return caps.can_generate_method_entry_events == 1;
+
+ case ArtJvmtiEvent::kMethodExit:
+ return caps.can_generate_method_exit_events == 1;
+
+ case ArtJvmtiEvent::kMonitorContendedEnter:
+ case ArtJvmtiEvent::kMonitorContendedEntered:
+ case ArtJvmtiEvent::kMonitorWait:
+ case ArtJvmtiEvent::kMonitorWaited:
+ return caps.can_generate_monitor_events == 1;
+
+ case ArtJvmtiEvent::kNativeMethodBind:
+ return caps.can_generate_native_method_bind_events == 1;
+
+ case ArtJvmtiEvent::kObjectFree:
+ return caps.can_generate_object_free_events == 1;
+
+ case ArtJvmtiEvent::kSingleStep:
+ return caps.can_generate_single_step_events == 1;
+
+ case ArtJvmtiEvent::kVmObjectAlloc:
+ return caps.can_generate_vm_object_alloc_events == 1;
+
+ default:
+ return true;
+ }
+}
+
jvmtiError EventHandler::SetEvent(ArtJvmTiEnv* env,
art::Thread* thread,
ArtJvmtiEvent event,
@@ -319,8 +376,6 @@
}
}
- // TODO: Capability check.
-
if (mode != JVMTI_ENABLE && mode != JVMTI_DISABLE) {
return ERR(ILLEGAL_ARGUMENT);
}
@@ -329,6 +384,10 @@
return ERR(INVALID_EVENT_TYPE);
}
+ if (!HasAssociatedCapability(env, event)) {
+ return ERR(MUST_POSSESS_CAPABILITY);
+ }
+
bool old_state = global_mask.Test(event);
if (mode == JVMTI_ENABLE) {
diff --git a/runtime/openjdkjvmti/events.h b/runtime/openjdkjvmti/events.h
index 8e246de..4e20d17 100644
--- a/runtime/openjdkjvmti/events.h
+++ b/runtime/openjdkjvmti/events.h
@@ -156,9 +156,9 @@
ArtJvmtiEvent event,
jvmtiEventMode mode);
- template <typename ...Args>
+ template <ArtJvmtiEvent kEvent, typename ...Args>
ALWAYS_INLINE
- inline void DispatchEvent(art::Thread* thread, ArtJvmtiEvent event, Args... args) const;
+ inline void DispatchEvent(art::Thread* thread, Args... args) const;
// Tell the event handler capabilities were added/lost so it can adjust the sent events.If
// caps_added is true then caps is all the newly set capabilities of the jvmtiEnv. If it is false
@@ -169,8 +169,9 @@
bool added);
private:
+ template <ArtJvmtiEvent kEvent>
ALWAYS_INLINE
- static inline bool ShouldDispatch(ArtJvmtiEvent event, ArtJvmTiEnv* env, art::Thread* thread);
+ static inline bool ShouldDispatch(ArtJvmTiEnv* env, art::Thread* thread);
ALWAYS_INLINE
inline bool NeedsEventUpdate(ArtJvmTiEnv* env,
@@ -181,14 +182,17 @@
ALWAYS_INLINE
inline void RecalculateGlobalEventMask(ArtJvmtiEvent event);
- template <typename ...Args>
- ALWAYS_INLINE inline void GenericDispatchEvent(art::Thread* thread,
- ArtJvmtiEvent event,
- Args... args) const;
- template <typename ...Args>
+ template <ArtJvmtiEvent kEvent>
ALWAYS_INLINE inline void DispatchClassFileLoadHookEvent(art::Thread* thread,
- ArtJvmtiEvent event,
- Args... args) const;
+ JNIEnv* jnienv,
+ jclass class_being_redefined,
+ jobject loader,
+ const char* name,
+ jobject protection_domain,
+ jint class_data_len,
+ const unsigned char* class_data,
+ jint* new_class_data_len,
+ unsigned char** new_class_data) const;
void HandleEventType(ArtJvmtiEvent event, bool enable);
diff --git a/runtime/openjdkjvmti/object_tagging.cc b/runtime/openjdkjvmti/object_tagging.cc
index 94cb46a..b27c2a3 100644
--- a/runtime/openjdkjvmti/object_tagging.cc
+++ b/runtime/openjdkjvmti/object_tagging.cc
@@ -207,7 +207,7 @@
}
void ObjectTagTable::HandleNullSweep(jlong tag) {
- event_handler_->DispatchEvent(nullptr, ArtJvmtiEvent::kObjectFree, tag);
+ event_handler_->DispatchEvent<ArtJvmtiEvent::kObjectFree>(nullptr, tag);
}
template <typename T, ObjectTagTable::TableUpdateNullTarget kTargetNull>
diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc
index 450f75a..c14fd84 100644
--- a/runtime/openjdkjvmti/ti_class.cc
+++ b/runtime/openjdkjvmti/ti_class.cc
@@ -31,6 +31,8 @@
#include "ti_class.h"
+#include "android-base/stringprintf.h"
+
#include <mutex>
#include <unordered_set>
@@ -38,34 +40,226 @@
#include "base/macros.h"
#include "class_table-inl.h"
#include "class_linker.h"
+#include "common_throws.h"
#include "events-inl.h"
#include "handle.h"
#include "jni_env_ext-inl.h"
#include "jni_internal.h"
+#include "mirror/array-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_ext.h"
#include "runtime.h"
#include "runtime_callbacks.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
#include "thread_list.h"
+#include "ti_class_loader.h"
+#include "ti_redefine.h"
+#include "utils.h"
namespace openjdkjvmti {
+using android::base::StringPrintf;
+
+static std::unique_ptr<const art::DexFile> MakeSingleDexFile(art::Thread* self,
+ const char* descriptor,
+ const std::string& orig_location,
+ jint final_len,
+ const unsigned char* final_dex_data)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ // Make the mmap
+ std::string error_msg;
+ std::unique_ptr<art::MemMap> map(Redefiner::MoveDataToMemMap(orig_location,
+ final_len,
+ final_dex_data,
+ &error_msg));
+ if (map.get() == nullptr) {
+ LOG(WARNING) << "Unable to allocate mmap for redefined dex file! Error was: " << error_msg;
+ self->ThrowOutOfMemoryError(StringPrintf(
+ "Unable to allocate dex file for transformation of %s", descriptor).c_str());
+ return nullptr;
+ }
+
+ // Make a dex-file
+ if (map->Size() < sizeof(art::DexFile::Header)) {
+ LOG(WARNING) << "Could not read dex file header because dex_data was too short";
+ art::ThrowClassFormatError(nullptr,
+ "Unable to read transformed dex file of %s",
+ descriptor);
+ return nullptr;
+ }
+ uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
+ std::unique_ptr<const art::DexFile> dex_file(art::DexFile::Open(map->GetName(),
+ checksum,
+ std::move(map),
+ /*verify*/true,
+ /*verify_checksum*/true,
+ &error_msg));
+ if (dex_file.get() == nullptr) {
+ LOG(WARNING) << "Unable to load modified dex file for " << descriptor << ": " << error_msg;
+ art::ThrowClassFormatError(nullptr,
+ "Unable to read transformed dex file of %s because %s",
+ descriptor,
+ error_msg.c_str());
+ return nullptr;
+ }
+ if (dex_file->NumClassDefs() != 1) {
+ LOG(WARNING) << "Dex file contains more than 1 class_def. Ignoring.";
+ // TODO Throw some other sort of error here maybe?
+ art::ThrowClassFormatError(
+ nullptr,
+ "Unable to use transformed dex file of %s because it contained too many classes",
+ descriptor);
+ return nullptr;
+ }
+ return dex_file;
+}
+
struct ClassCallback : public art::ClassLoadCallback {
+ void ClassPreDefine(const char* descriptor,
+ art::Handle<art::mirror::Class> klass,
+ art::Handle<art::mirror::ClassLoader> class_loader,
+ const art::DexFile& initial_dex_file,
+ const art::DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
+ /*out*/art::DexFile const** final_dex_file,
+ /*out*/art::DexFile::ClassDef const** final_class_def)
+ OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ bool is_enabled =
+ event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassFileLoadHookRetransformable) ||
+ event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassFileLoadHookNonRetransformable);
+ if (!is_enabled) {
+ return;
+ }
+ if (descriptor[0] != 'L') {
+ // It is a primitive or array. Just return
+ return;
+ }
+ std::string name(std::string(descriptor).substr(1, strlen(descriptor) - 2));
+
+ art::Thread* self = art::Thread::Current();
+ art::JNIEnvExt* env = self->GetJniEnv();
+ ScopedLocalRef<jobject> loader(
+ env, class_loader.IsNull() ? nullptr : env->AddLocalReference<jobject>(class_loader.Get()));
+ // Go back to native.
+ art::ScopedThreadSuspension sts(self, art::ThreadState::kNative);
+ // Call all Non-retransformable agents.
+ jint post_no_redefine_len = 0;
+ unsigned char* post_no_redefine_dex_data = nullptr;
+ std::unique_ptr<const unsigned char> post_no_redefine_unique_ptr(nullptr);
+ event_handler->DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookNonRetransformable>(
+ self,
+ static_cast<JNIEnv*>(env),
+ static_cast<jclass>(nullptr), // The class doesn't really exist yet so send null.
+ loader.get(),
+ name.c_str(),
+ static_cast<jobject>(nullptr), // Android doesn't seem to have protection domains
+ static_cast<jint>(initial_dex_file.Size()),
+ static_cast<const unsigned char*>(initial_dex_file.Begin()),
+ static_cast<jint*>(&post_no_redefine_len),
+ static_cast<unsigned char**>(&post_no_redefine_dex_data));
+ if (post_no_redefine_dex_data == nullptr) {
+ DCHECK_EQ(post_no_redefine_len, 0);
+ post_no_redefine_dex_data = const_cast<unsigned char*>(initial_dex_file.Begin());
+ post_no_redefine_len = initial_dex_file.Size();
+ } else {
+ post_no_redefine_unique_ptr = std::unique_ptr<const unsigned char>(post_no_redefine_dex_data);
+ DCHECK_GT(post_no_redefine_len, 0);
+ }
+ // Call all retransformable agents.
+ jint final_len = 0;
+ unsigned char* final_dex_data = nullptr;
+ std::unique_ptr<const unsigned char> final_dex_unique_ptr(nullptr);
+ event_handler->DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
+ self,
+ static_cast<JNIEnv*>(env),
+ static_cast<jclass>(nullptr), // The class doesn't really exist yet so send null.
+ loader.get(),
+ name.c_str(),
+ static_cast<jobject>(nullptr), // Android doesn't seem to have protection domains
+ static_cast<jint>(post_no_redefine_len),
+ static_cast<const unsigned char*>(post_no_redefine_dex_data),
+ static_cast<jint*>(&final_len),
+ static_cast<unsigned char**>(&final_dex_data));
+ if (final_dex_data == nullptr) {
+ DCHECK_EQ(final_len, 0);
+ final_dex_data = post_no_redefine_dex_data;
+ final_len = post_no_redefine_len;
+ } else {
+ final_dex_unique_ptr = std::unique_ptr<const unsigned char>(final_dex_data);
+ DCHECK_GT(final_len, 0);
+ }
+
+ if (final_dex_data != initial_dex_file.Begin()) {
+ LOG(WARNING) << "Changing class " << descriptor;
+ art::ScopedObjectAccess soa(self);
+ art::StackHandleScope<2> hs(self);
+ // Save the results of all the non-retransformable agents.
+ // First allocate the ClassExt
+ art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->EnsureExtDataPresent(self)));
+ // Make sure we have a ClassExt. This is fine even though we are a temporary since it will
+ // get copied.
+ if (ext.IsNull()) {
+ // We will just return failure if we fail to allocate
+ LOG(WARNING) << "Could not allocate ext-data for class '" << descriptor << "'. "
+ << "Aborting transformation since we will be unable to store it.";
+ self->AssertPendingOOMException();
+ return;
+ }
+
+ // Allocate the byte array to store the dex file bytes in.
+ art::Handle<art::mirror::ByteArray> arr(hs.NewHandle(
+ art::mirror::ByteArray::AllocateAndFill(
+ self,
+ reinterpret_cast<const signed char*>(post_no_redefine_dex_data),
+ post_no_redefine_len)));
+ if (arr.IsNull()) {
+ LOG(WARNING) << "Unable to allocate byte array for initial dex-file bytes. Aborting "
+ << "transformation";
+ self->AssertPendingOOMException();
+ return;
+ }
+
+ std::unique_ptr<const art::DexFile> dex_file(MakeSingleDexFile(self,
+ descriptor,
+ initial_dex_file.GetLocation(),
+ final_len,
+ final_dex_data));
+ if (dex_file.get() == nullptr) {
+ return;
+ }
+
+ // TODO Check Redefined dex file for all invariants.
+ LOG(WARNING) << "Dex file created by class-definition time transformation of "
+ << descriptor << " is not checked for all retransformation invariants.";
+
+ if (!ClassLoaderHelper::AddToClassLoader(self, class_loader, dex_file.get())) {
+ LOG(ERROR) << "Unable to add " << descriptor << " to class loader!";
+ return;
+ }
+
+ // Actually set the ClassExt's original bytes once we have actually succeeded.
+ ext->SetOriginalDexFileBytes(arr.Get());
+ // Set the return values
+ *final_class_def = &dex_file->GetClassDef(0);
+ *final_dex_file = dex_file.release();
+ }
+ }
+
void ClassLoad(art::Handle<art::mirror::Class> klass) REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassLoad)) {
art::Thread* thread = art::Thread::Current();
ScopedLocalRef<jclass> jklass(thread->GetJniEnv(),
thread->GetJniEnv()->AddLocalReference<jclass>(klass.Get()));
- ScopedLocalRef<jclass> jthread(
- thread->GetJniEnv(), thread->GetJniEnv()->AddLocalReference<jclass>(thread->GetPeer()));
+ ScopedLocalRef<jthread> thread_jni(
+ thread->GetJniEnv(), thread->GetJniEnv()->AddLocalReference<jthread>(thread->GetPeer()));
{
art::ScopedThreadSuspension sts(thread, art::ThreadState::kNative);
- event_handler->DispatchEvent(thread,
- ArtJvmtiEvent::kClassLoad,
- reinterpret_cast<JNIEnv*>(thread->GetJniEnv()),
- jthread.get(),
- jklass.get());
+ event_handler->DispatchEvent<ArtJvmtiEvent::kClassLoad>(
+ thread,
+ static_cast<JNIEnv*>(thread->GetJniEnv()),
+ thread_jni.get(),
+ jklass.get());
}
AddTempClass(thread, jklass.get());
}
@@ -78,14 +272,14 @@
art::Thread* thread = art::Thread::Current();
ScopedLocalRef<jclass> jklass(thread->GetJniEnv(),
thread->GetJniEnv()->AddLocalReference<jclass>(klass.Get()));
- ScopedLocalRef<jclass> jthread(
- thread->GetJniEnv(), thread->GetJniEnv()->AddLocalReference<jclass>(thread->GetPeer()));
+ ScopedLocalRef<jthread> thread_jni(
+ thread->GetJniEnv(), thread->GetJniEnv()->AddLocalReference<jthread>(thread->GetPeer()));
art::ScopedThreadSuspension sts(thread, art::ThreadState::kNative);
- event_handler->DispatchEvent(thread,
- ArtJvmtiEvent::kClassPrepare,
- reinterpret_cast<JNIEnv*>(thread->GetJniEnv()),
- jthread.get(),
- jklass.get());
+ event_handler->DispatchEvent<ArtJvmtiEvent::kClassPrepare>(
+ thread,
+ static_cast<JNIEnv*>(thread->GetJniEnv()),
+ thread_jni.get(),
+ jklass.get());
}
}
diff --git a/runtime/openjdkjvmti/ti_class_loader.cc b/runtime/openjdkjvmti/ti_class_loader.cc
new file mode 100644
index 0000000..afec0bf
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_class_loader.cc
@@ -0,0 +1,211 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "ti_class_loader.h"
+
+#include <limits>
+
+#include "android-base/stringprintf.h"
+
+#include "art_jvmti.h"
+#include "base/array_slice.h"
+#include "base/logging.h"
+#include "dex_file.h"
+#include "dex_file_types.h"
+#include "events-inl.h"
+#include "gc/allocation_listener.h"
+#include "gc/heap.h"
+#include "instrumentation.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+#include "jni_env_ext-inl.h"
+#include "jvmti_allocator.h"
+#include "mirror/class.h"
+#include "mirror/class_ext.h"
+#include "mirror/object.h"
+#include "object_lock.h"
+#include "runtime.h"
+#include "ScopedLocalRef.h"
+#include "transform.h"
+
+namespace openjdkjvmti {
+
+bool ClassLoaderHelper::AddToClassLoader(art::Thread* self,
+ art::Handle<art::mirror::ClassLoader> loader,
+ const art::DexFile* dex_file) {
+ art::ScopedObjectAccessUnchecked soa(self);
+ art::StackHandleScope<3> hs(self);
+ if (art::ClassLinker::IsBootClassLoader(soa, loader.Get())) {
+ art::Runtime::Current()->GetClassLinker()->AppendToBootClassPath(self, *dex_file);
+ return true;
+ }
+ art::Handle<art::mirror::Object> java_dex_file_obj(
+ hs.NewHandle(FindSourceDexFileObject(self, loader)));
+ if (java_dex_file_obj.IsNull()) {
+ return false;
+ }
+ art::Handle<art::mirror::LongArray> old_cookie(hs.NewHandle(GetDexFileCookie(java_dex_file_obj)));
+ art::Handle<art::mirror::LongArray> cookie(hs.NewHandle(
+ AllocateNewDexFileCookie(self, old_cookie, dex_file)));
+ if (cookie.IsNull()) {
+ return false;
+ }
+ art::ScopedAssertNoThreadSuspension nts("Replacing cookie fields in j.l.DexFile object");
+ UpdateJavaDexFile(java_dex_file_obj.Get(), cookie.Get());
+ return true;
+}
+
+void ClassLoaderHelper::UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file,
+ art::ObjPtr<art::mirror::LongArray> new_cookie) {
+ art::ArtField* internal_cookie_field = java_dex_file->GetClass()->FindDeclaredInstanceField(
+ "mInternalCookie", "Ljava/lang/Object;");
+ art::ArtField* cookie_field = java_dex_file->GetClass()->FindDeclaredInstanceField(
+ "mCookie", "Ljava/lang/Object;");
+ CHECK(internal_cookie_field != nullptr);
+ art::ObjPtr<art::mirror::LongArray> orig_internal_cookie(
+ internal_cookie_field->GetObject(java_dex_file)->AsLongArray());
+ art::ObjPtr<art::mirror::LongArray> orig_cookie(
+ cookie_field->GetObject(java_dex_file)->AsLongArray());
+ internal_cookie_field->SetObject<false>(java_dex_file, new_cookie);
+ if (!orig_cookie.IsNull()) {
+ cookie_field->SetObject<false>(java_dex_file, new_cookie);
+ }
+}
+
+art::ObjPtr<art::mirror::LongArray> ClassLoaderHelper::GetDexFileCookie(
+ art::Handle<art::mirror::Object> java_dex_file_obj) {
+ // mCookie is nulled out if the DexFile has been closed but mInternalCookie sticks around until
+ // the object is finalized. Since they always point to the same array if mCookie is not null we
+ // just use the mInternalCookie field. We will update one or both of these fields later.
+ // TODO Should I get the class from the classloader or directly?
+ art::ArtField* internal_cookie_field = java_dex_file_obj->GetClass()->FindDeclaredInstanceField(
+ "mInternalCookie", "Ljava/lang/Object;");
+ // TODO Add check that mCookie is either null or same as mInternalCookie
+ CHECK(internal_cookie_field != nullptr);
+ return internal_cookie_field->GetObject(java_dex_file_obj.Get())->AsLongArray();
+}
+
+// TODO Really wishing I had that mirror of java.lang.DexFile now.
+art::ObjPtr<art::mirror::LongArray> ClassLoaderHelper::AllocateNewDexFileCookie(
+ art::Thread* self,
+ art::Handle<art::mirror::LongArray> cookie,
+ const art::DexFile* dex_file) {
+ art::StackHandleScope<1> hs(self);
+ CHECK(cookie.Get() != nullptr);
+ CHECK_GE(cookie->GetLength(), 1);
+ art::Handle<art::mirror::LongArray> new_cookie(
+ hs.NewHandle(art::mirror::LongArray::Alloc(self, cookie->GetLength() + 1)));
+ if (new_cookie.Get() == nullptr) {
+ self->AssertPendingOOMException();
+ return nullptr;
+ }
+ // Copy the oat-dex field at the start.
+ // TODO Should I clear this field?
+ // TODO This is a really crappy thing here with the first element being different.
+ new_cookie->SetWithoutChecks<false>(0, cookie->GetWithoutChecks(0));
+ // This must match the casts in runtime/native/dalvik_system_DexFile.cc:ConvertDexFilesToJavaArray
+ new_cookie->SetWithoutChecks<false>(
+ 1, static_cast<int64_t>(reinterpret_cast<uintptr_t>(dex_file)));
+ new_cookie->Memcpy(2, cookie.Get(), 1, cookie->GetLength() - 1);
+ return new_cookie.Get();
+}
+
+// TODO This should return the actual source java.lang.DexFile object for the klass being loaded.
+art::ObjPtr<art::mirror::Object> ClassLoaderHelper::FindSourceDexFileObject(
+ art::Thread* self, art::Handle<art::mirror::ClassLoader> loader) {
+ const char* dex_path_list_element_array_name = "[Ldalvik/system/DexPathList$Element;";
+ const char* dex_path_list_element_name = "Ldalvik/system/DexPathList$Element;";
+ const char* dex_file_name = "Ldalvik/system/DexFile;";
+ const char* dex_path_list_name = "Ldalvik/system/DexPathList;";
+ const char* dex_class_loader_name = "Ldalvik/system/BaseDexClassLoader;";
+
+ CHECK(!self->IsExceptionPending());
+ art::StackHandleScope<5> hs(self);
+ art::ClassLinker* class_linker = art::Runtime::Current()->GetClassLinker();
+
+ art::Handle<art::mirror::ClassLoader> null_loader(hs.NewHandle<art::mirror::ClassLoader>(
+ nullptr));
+ art::Handle<art::mirror::Class> base_dex_loader_class(hs.NewHandle(class_linker->FindClass(
+ self, dex_class_loader_name, null_loader)));
+
+ // Get all the ArtFields so we can look in the BaseDexClassLoader
+ art::ArtField* path_list_field = base_dex_loader_class->FindDeclaredInstanceField(
+ "pathList", dex_path_list_name);
+ CHECK(path_list_field != nullptr);
+
+ art::ArtField* dex_path_list_element_field =
+ class_linker->FindClass(self, dex_path_list_name, null_loader)
+ ->FindDeclaredInstanceField("dexElements", dex_path_list_element_array_name);
+ CHECK(dex_path_list_element_field != nullptr);
+
+ art::ArtField* element_dex_file_field =
+ class_linker->FindClass(self, dex_path_list_element_name, null_loader)
+ ->FindDeclaredInstanceField("dexFile", dex_file_name);
+ CHECK(element_dex_file_field != nullptr);
+
+ // Check if loader is a BaseDexClassLoader
+ art::Handle<art::mirror::Class> loader_class(hs.NewHandle(loader->GetClass()));
+ // Currently only base_dex_loader is allowed to actually define classes but if this changes in the
+ // future we should make sure to support all class loader types.
+ if (!loader_class->IsSubClass(base_dex_loader_class.Get())) {
+ LOG(ERROR) << "The classloader is not a BaseDexClassLoader which is currently the only "
+ << "supported class loader type!";
+ return nullptr;
+ }
+ // Start navigating the fields of the loader (now known to be a BaseDexClassLoader derivative)
+ art::Handle<art::mirror::Object> path_list(
+ hs.NewHandle(path_list_field->GetObject(loader.Get())));
+ CHECK(path_list.Get() != nullptr);
+ CHECK(!self->IsExceptionPending());
+ art::Handle<art::mirror::ObjectArray<art::mirror::Object>> dex_elements_list(hs.NewHandle(
+ dex_path_list_element_field->GetObject(path_list.Get())->
+ AsObjectArray<art::mirror::Object>()));
+ CHECK(!self->IsExceptionPending());
+ CHECK(dex_elements_list.Get() != nullptr);
+ size_t num_elements = dex_elements_list->GetLength();
+ // Iterate over the DexPathList$Element to find the right one
+ for (size_t i = 0; i < num_elements; i++) {
+ art::ObjPtr<art::mirror::Object> current_element = dex_elements_list->Get(i);
+ CHECK(!current_element.IsNull());
+ // TODO It would be cleaner to put the art::DexFile into the dalvik.system.DexFile the class
+ // comes from but it is more annoying because we would need to find this class. It is not
+ // necessary for proper function since we just need to be in front of the classes old dex file
+ // in the path.
+ art::ObjPtr<art::mirror::Object> first_dex_file(
+ element_dex_file_field->GetObject(current_element));
+ if (!first_dex_file.IsNull()) {
+ return first_dex_file;
+ }
+ }
+ return nullptr;
+}
+
+} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_class_loader.h b/runtime/openjdkjvmti/ti_class_loader.h
new file mode 100644
index 0000000..1ac4988
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_class_loader.h
@@ -0,0 +1,99 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_CLASS_LOADER_H_
+#define ART_RUNTIME_OPENJDKJVMTI_TI_CLASS_LOADER_H_
+
+#include <string>
+
+#include <jni.h>
+
+#include "art_jvmti.h"
+#include "art_method.h"
+#include "base/array_slice.h"
+#include "class_linker.h"
+#include "dex_file.h"
+#include "gc_root-inl.h"
+#include "globals.h"
+#include "jni_env_ext-inl.h"
+#include "jvmti.h"
+#include "linear_alloc.h"
+#include "mem_map.h"
+#include "mirror/array-inl.h"
+#include "mirror/array.h"
+#include "mirror/class-inl.h"
+#include "mirror/class.h"
+#include "mirror/class_loader-inl.h"
+#include "mirror/string-inl.h"
+#include "oat_file.h"
+#include "obj_ptr.h"
+#include "scoped_thread_state_change-inl.h"
+#include "stack.h"
+#include "ti_class_definition.h"
+#include "thread_list.h"
+#include "transform.h"
+#include "utf.h"
+#include "utils/dex_cache_arrays_layout-inl.h"
+
+namespace openjdkjvmti {
+
+// Class that can redefine a single class's methods.
+// TODO We should really make this be driven by an outside class so we can do multiple classes at
+// the same time and have less required cleanup.
+class ClassLoaderHelper {
+ public:
+ static bool AddToClassLoader(art::Thread* self,
+ art::Handle<art::mirror::ClassLoader> loader,
+ const art::DexFile* dex_file)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ // Finds a java.lang.DexFile object that is associated with the given ClassLoader. Each of these
+ // j.l.DexFile objects holds several art::DexFile*s in it.
+ // TODO This should return the actual source java.lang.DexFile object for the klass being loaded.
+ static art::ObjPtr<art::mirror::Object> FindSourceDexFileObject(
+ art::Thread* self, art::Handle<art::mirror::ClassLoader> loader)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ static art::ObjPtr<art::mirror::LongArray> GetDexFileCookie(
+ art::Handle<art::mirror::Object> java_dex_file) REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ static art::ObjPtr<art::mirror::LongArray> AllocateNewDexFileCookie(
+ art::Thread* self,
+ art::Handle<art::mirror::LongArray> old_dex_file_cookie,
+ const art::DexFile* new_dex_file) REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ static void UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file,
+ art::ObjPtr<art::mirror::LongArray> new_cookie)
+ REQUIRES(art::Roles::uninterruptible_) REQUIRES_SHARED(art::Locks::mutator_lock_);
+};
+
+} // namespace openjdkjvmti
+#endif // ART_RUNTIME_OPENJDKJVMTI_TI_CLASS_LOADER_H_
diff --git a/runtime/openjdkjvmti/ti_dump.cc b/runtime/openjdkjvmti/ti_dump.cc
index 2ee5c40..d9e3ef1 100644
--- a/runtime/openjdkjvmti/ti_dump.cc
+++ b/runtime/openjdkjvmti/ti_dump.cc
@@ -48,7 +48,7 @@
void SigQuit() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::Thread* thread = art::Thread::Current();
art::ScopedThreadSuspension sts(thread, art::ThreadState::kNative);
- event_handler->DispatchEvent(nullptr, ArtJvmtiEvent::kDataDumpRequest);
+ event_handler->DispatchEvent<ArtJvmtiEvent::kDataDumpRequest>(nullptr);
}
EventHandler* event_handler = nullptr;
diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc
index 7b2521d..fe3e52b 100644
--- a/runtime/openjdkjvmti/ti_heap.cc
+++ b/runtime/openjdkjvmti/ti_heap.cc
@@ -303,11 +303,11 @@
art::Thread* thread = FindThread(info);
if (thread != nullptr) {
- art::mirror::Object* thread_obj = thread->GetPeer();
+ art::mirror::Object* thread_obj;
if (thread->IsStillStarting()) {
thread_obj = nullptr;
} else {
- thread_obj = thread->GetPeer();
+ thread_obj = thread->GetPeerFromOtherThread();
}
if (thread_obj != nullptr) {
ref_info->jni_local.thread_tag = tag_table_->GetTagOrZero(thread_obj);
@@ -333,11 +333,11 @@
art::Thread* thread = FindThread(info);
if (thread != nullptr) {
- art::mirror::Object* thread_obj = thread->GetPeer();
+ art::mirror::Object* thread_obj;
if (thread->IsStillStarting()) {
thread_obj = nullptr;
} else {
- thread_obj = thread->GetPeer();
+ thread_obj = thread->GetPeerFromOtherThread();
}
if (thread_obj != nullptr) {
ref_info->stack_local.thread_tag = tag_table_->GetTagOrZero(thread_obj);
diff --git a/runtime/openjdkjvmti/ti_monitor.cc b/runtime/openjdkjvmti/ti_monitor.cc
index b827683..645faea 100644
--- a/runtime/openjdkjvmti/ti_monitor.cc
+++ b/runtime/openjdkjvmti/ti_monitor.cc
@@ -54,7 +54,7 @@
JvmtiMonitor() : owner_(nullptr), count_(0) {
}
- static bool Destroy(art::Thread* self, JvmtiMonitor* monitor) {
+ static bool Destroy(art::Thread* self, JvmtiMonitor* monitor) NO_THREAD_SAFETY_ANALYSIS {
// Check whether this thread holds the monitor, or nobody does.
art::Thread* owner_thread = monitor->owner_.load(std::memory_order_relaxed);
if (owner_thread != nullptr && self != owner_thread) {
@@ -71,7 +71,7 @@
return true;
}
- void MonitorEnter(art::Thread* self) {
+ void MonitorEnter(art::Thread* self) NO_THREAD_SAFETY_ANALYSIS {
// Check for recursive enter.
if (IsOwner(self)) {
count_++;
@@ -86,7 +86,7 @@
count_ = 1;
}
- bool MonitorExit(art::Thread* self) {
+ bool MonitorExit(art::Thread* self) NO_THREAD_SAFETY_ANALYSIS {
if (!IsOwner(self)) {
return false;
}
diff --git a/runtime/openjdkjvmti/ti_phase.cc b/runtime/openjdkjvmti/ti_phase.cc
index 154406a..60371cf 100644
--- a/runtime/openjdkjvmti/ti_phase.cc
+++ b/runtime/openjdkjvmti/ti_phase.cc
@@ -64,7 +64,7 @@
case RuntimePhase::kStart:
{
art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kNative);
- event_handler->DispatchEvent(nullptr, ArtJvmtiEvent::kVmStart, GetJniEnv());
+ event_handler->DispatchEvent<ArtJvmtiEvent::kVmStart>(nullptr, GetJniEnv());
PhaseUtil::current_phase_ = JVMTI_PHASE_START;
}
break;
@@ -72,17 +72,14 @@
{
ScopedLocalRef<jthread> thread(GetJniEnv(), GetCurrentJThread());
art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kNative);
- event_handler->DispatchEvent(nullptr,
- ArtJvmtiEvent::kVmInit,
- GetJniEnv(),
- thread.get());
+ event_handler->DispatchEvent<ArtJvmtiEvent::kVmInit>(nullptr, GetJniEnv(), thread.get());
PhaseUtil::current_phase_ = JVMTI_PHASE_LIVE;
}
break;
case RuntimePhase::kDeath:
{
art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kNative);
- event_handler->DispatchEvent(nullptr, ArtJvmtiEvent::kVmDeath, GetJniEnv());
+ event_handler->DispatchEvent<ArtJvmtiEvent::kVmDeath>(nullptr, GetJniEnv());
PhaseUtil::current_phase_ = JVMTI_PHASE_DEAD;
}
// TODO: Block events now.
@@ -139,4 +136,8 @@
art::Runtime::Current()->GetRuntimeCallbacks()->RemoveRuntimePhaseCallback(&gPhaseCallback);
}
+jvmtiPhase PhaseUtil::GetPhaseUnchecked() {
+ return PhaseUtil::current_phase_;
+}
+
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_phase.h b/runtime/openjdkjvmti/ti_phase.h
index bd15fa6..851fc27 100644
--- a/runtime/openjdkjvmti/ti_phase.h
+++ b/runtime/openjdkjvmti/ti_phase.h
@@ -57,6 +57,8 @@
struct PhaseCallback;
+ static jvmtiPhase GetPhaseUnchecked();
+
private:
static jvmtiPhase current_phase_;
};
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index 34efc50..058b93a 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -48,12 +48,13 @@
#include "jit/jit_code_cache.h"
#include "jni_env_ext-inl.h"
#include "jvmti_allocator.h"
-#include "mirror/class.h"
+#include "mirror/class-inl.h"
#include "mirror/class_ext.h"
#include "mirror/object.h"
#include "object_lock.h"
#include "runtime.h"
#include "ScopedLocalRef.h"
+#include "ti_class_loader.h"
#include "transform.h"
namespace openjdkjvmti {
@@ -74,9 +75,7 @@
StackVisitor::StackWalkKind::kIncludeInlinedFrames),
allocator_(allocator),
obsoleted_methods_(obsoleted_methods),
- obsolete_maps_(obsolete_maps),
- is_runtime_frame_(false) {
- }
+ obsolete_maps_(obsolete_maps) { }
~ObsoleteMethodStackVisitor() OVERRIDE {}
@@ -99,21 +98,7 @@
bool VisitFrame() OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
art::ArtMethod* old_method = GetMethod();
- // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
- // works through runtime methods.
- bool prev_was_runtime_frame_ = is_runtime_frame_;
- is_runtime_frame_ = old_method->IsRuntimeMethod();
if (obsoleted_methods_.find(old_method) != obsoleted_methods_.end()) {
- // The check below works since when we deoptimize we set shadow frames for all frames until a
- // native/runtime transition and for those set the return PC to a function that will complete
- // the deoptimization. This does leave us with the unfortunate side-effect that frames just
- // below runtime frames cannot be deoptimized at the moment.
- // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
- // works through runtime methods.
- // TODO b/33616143
- if (!IsShadowFrame() && prev_was_runtime_frame_) {
- LOG(FATAL) << "Deoptimization failed due to runtime method in stack. See b/33616143";
- }
// We cannot ensure that the right dex file is used in inlined frames so we don't support
// redefining them.
DCHECK(!IsInInlinedFrame()) << "Inlined frames are not supported when using redefinition";
@@ -136,6 +121,7 @@
new_obsolete_method->CopyFrom(old_method, ptr_size);
DCHECK_EQ(new_obsolete_method->GetDeclaringClass(), old_method->GetDeclaringClass());
new_obsolete_method->SetIsObsolete();
+ new_obsolete_method->SetDontCompile();
obsolete_maps_->insert({old_method, new_obsolete_method});
// Update JIT Data structures to point to the new method.
art::jit::Jit* jit = art::Runtime::Current()->GetJit();
@@ -162,9 +148,6 @@
// values in this map must be added to the obsolete_methods_ (and obsolete_dex_caches_) fields of
// the redefined classes ClassExt by the caller.
std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps_;
- // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
- // works through runtime methods.
- bool is_runtime_frame_;
};
jvmtiError Redefiner::IsModifiableClass(jvmtiEnv* env ATTRIBUTE_UNUSED,
@@ -386,85 +369,6 @@
return OK;
}
-// TODO *MAJOR* This should return the actual source java.lang.DexFile object for the klass.
-// TODO Make mirror of DexFile and associated types to make this less hellish.
-// TODO Make mirror of BaseDexClassLoader and associated types to make this less hellish.
-art::mirror::Object* Redefiner::ClassRedefinition::FindSourceDexFileObject(
- art::Handle<art::mirror::ClassLoader> loader) {
- const char* dex_path_list_element_array_name = "[Ldalvik/system/DexPathList$Element;";
- const char* dex_path_list_element_name = "Ldalvik/system/DexPathList$Element;";
- const char* dex_file_name = "Ldalvik/system/DexFile;";
- const char* dex_path_list_name = "Ldalvik/system/DexPathList;";
- const char* dex_class_loader_name = "Ldalvik/system/BaseDexClassLoader;";
-
- CHECK(!driver_->self_->IsExceptionPending());
- art::StackHandleScope<11> hs(driver_->self_);
- art::ClassLinker* class_linker = driver_->runtime_->GetClassLinker();
-
- art::Handle<art::mirror::ClassLoader> null_loader(hs.NewHandle<art::mirror::ClassLoader>(
- nullptr));
- art::Handle<art::mirror::Class> base_dex_loader_class(hs.NewHandle(class_linker->FindClass(
- driver_->self_, dex_class_loader_name, null_loader)));
-
- // Get all the ArtFields so we can look in the BaseDexClassLoader
- art::ArtField* path_list_field = base_dex_loader_class->FindDeclaredInstanceField(
- "pathList", dex_path_list_name);
- CHECK(path_list_field != nullptr);
-
- art::ArtField* dex_path_list_element_field =
- class_linker->FindClass(driver_->self_, dex_path_list_name, null_loader)
- ->FindDeclaredInstanceField("dexElements", dex_path_list_element_array_name);
- CHECK(dex_path_list_element_field != nullptr);
-
- art::ArtField* element_dex_file_field =
- class_linker->FindClass(driver_->self_, dex_path_list_element_name, null_loader)
- ->FindDeclaredInstanceField("dexFile", dex_file_name);
- CHECK(element_dex_file_field != nullptr);
-
- // Check if loader is a BaseDexClassLoader
- art::Handle<art::mirror::Class> loader_class(hs.NewHandle(loader->GetClass()));
- if (!loader_class->IsSubClass(base_dex_loader_class.Get())) {
- LOG(ERROR) << "The classloader is not a BaseDexClassLoader which is currently the only "
- << "supported class loader type!";
- return nullptr;
- }
- // Start navigating the fields of the loader (now known to be a BaseDexClassLoader derivative)
- art::Handle<art::mirror::Object> path_list(
- hs.NewHandle(path_list_field->GetObject(loader.Get())));
- CHECK(path_list.Get() != nullptr);
- CHECK(!driver_->self_->IsExceptionPending());
- art::Handle<art::mirror::ObjectArray<art::mirror::Object>> dex_elements_list(hs.NewHandle(
- dex_path_list_element_field->GetObject(path_list.Get())->
- AsObjectArray<art::mirror::Object>()));
- CHECK(!driver_->self_->IsExceptionPending());
- CHECK(dex_elements_list.Get() != nullptr);
- size_t num_elements = dex_elements_list->GetLength();
- art::MutableHandle<art::mirror::Object> current_element(
- hs.NewHandle<art::mirror::Object>(nullptr));
- art::MutableHandle<art::mirror::Object> first_dex_file(
- hs.NewHandle<art::mirror::Object>(nullptr));
- // Iterate over the DexPathList$Element to find the right one
- // TODO Or not ATM just return the first one.
- for (size_t i = 0; i < num_elements; i++) {
- current_element.Assign(dex_elements_list->Get(i));
- CHECK(current_element.Get() != nullptr);
- CHECK(!driver_->self_->IsExceptionPending());
- CHECK(dex_elements_list.Get() != nullptr);
- CHECK_EQ(current_element->GetClass(), class_linker->FindClass(driver_->self_,
- dex_path_list_element_name,
- null_loader));
- // TODO It would be cleaner to put the art::DexFile into the dalvik.system.DexFile the class
- // comes from but it is more annoying because we would need to find this class. It is not
- // necessary for proper function since we just need to be in front of the classes old dex file
- // in the path.
- first_dex_file.Assign(element_dex_file_field->GetObject(current_element.Get()));
- if (first_dex_file.Get() != nullptr) {
- return first_dex_file.Get();
- }
- }
- return nullptr;
-}
-
art::mirror::Class* Redefiner::ClassRedefinition::GetMirrorClass() {
return driver_->self_->DecodeJObject(klass_)->AsClass();
}
@@ -478,39 +382,6 @@
return driver_->runtime_->GetClassLinker()->RegisterDexFile(*dex_file_, loader.Get());
}
-// TODO Really wishing I had that mirror of java.lang.DexFile now.
-art::mirror::LongArray* Redefiner::ClassRedefinition::AllocateDexFileCookie(
- art::Handle<art::mirror::Object> java_dex_file_obj) {
- art::StackHandleScope<2> hs(driver_->self_);
- // mCookie is nulled out if the DexFile has been closed but mInternalCookie sticks around until
- // the object is finalized. Since they always point to the same array if mCookie is not null we
- // just use the mInternalCookie field. We will update one or both of these fields later.
- // TODO Should I get the class from the classloader or directly?
- art::ArtField* internal_cookie_field = java_dex_file_obj->GetClass()->FindDeclaredInstanceField(
- "mInternalCookie", "Ljava/lang/Object;");
- // TODO Add check that mCookie is either null or same as mInternalCookie
- CHECK(internal_cookie_field != nullptr);
- art::Handle<art::mirror::LongArray> cookie(
- hs.NewHandle(internal_cookie_field->GetObject(java_dex_file_obj.Get())->AsLongArray()));
- // TODO Maybe make these non-fatal.
- CHECK(cookie.Get() != nullptr);
- CHECK_GE(cookie->GetLength(), 1);
- art::Handle<art::mirror::LongArray> new_cookie(
- hs.NewHandle(art::mirror::LongArray::Alloc(driver_->self_, cookie->GetLength() + 1)));
- if (new_cookie.Get() == nullptr) {
- driver_->self_->AssertPendingOOMException();
- return nullptr;
- }
- // Copy the oat-dex field at the start.
- // TODO Should I clear this field?
- // TODO This is a really crappy thing here with the first element being different.
- new_cookie->SetWithoutChecks<false>(0, cookie->GetWithoutChecks(0));
- new_cookie->SetWithoutChecks<false>(
- 1, static_cast<int64_t>(reinterpret_cast<intptr_t>(dex_file_.get())));
- new_cookie->Memcpy(2, cookie.Get(), 1, cookie->GetLength() - 1);
- return new_cookie.Get();
-}
-
void Redefiner::RecordFailure(jvmtiError result,
const std::string& class_sig,
const std::string& error_msg) {
@@ -520,28 +391,13 @@
result_ = result;
}
-// Allocates a ByteArray big enough to store the given number of bytes and copies them from the
-// bytes pointer.
-static art::mirror::ByteArray* AllocateAndFillBytes(art::Thread* self,
- const uint8_t* bytes,
- int32_t num_bytes)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
- art::StackHandleScope<1> hs(self);
- art::Handle<art::mirror::ByteArray> arr(hs.NewHandle(
- art::mirror::ByteArray::Alloc(self, num_bytes)));
- if (!arr.IsNull()) {
- // Copy it in. Just skip if it's null
- memcpy(arr->GetData(), bytes, num_bytes);
- }
- return arr.Get();
-}
-
art::mirror::ByteArray* Redefiner::ClassRedefinition::AllocateOrGetOriginalDexFileBytes() {
// If we have been specifically given a new set of bytes use that
if (original_dex_file_.size() != 0) {
- return AllocateAndFillBytes(driver_->self_,
- &original_dex_file_.At(0),
- original_dex_file_.size());
+ return art::mirror::ByteArray::AllocateAndFill(
+ driver_->self_,
+ reinterpret_cast<const signed char*>(&original_dex_file_.At(0)),
+ original_dex_file_.size());
}
// See if we already have one set.
@@ -561,7 +417,10 @@
LOG(WARNING) << "Current dex file has more than one class in it. Calling RetransformClasses "
<< "on this class might fail if no transformations are applied to it!";
}
- return AllocateAndFillBytes(driver_->self_, current_dex_file.Begin(), current_dex_file.Size());
+ return art::mirror::ByteArray::AllocateAndFill(
+ driver_->self_,
+ reinterpret_cast<const signed char*>(current_dex_file.Begin()),
+ current_dex_file.Size());
}
struct CallbackCtx {
@@ -587,7 +446,8 @@
art::ScopedAssertNoThreadSuspension ns("No thread suspension during thread stack walking");
art::mirror::ClassExt* ext = art_klass->GetExtData();
CHECK(ext->GetObsoleteMethods() != nullptr);
- CallbackCtx ctx(art_klass->GetClassLoader()->GetAllocator());
+ art::ClassLinker* linker = driver_->runtime_->GetClassLinker();
+ CallbackCtx ctx(linker->GetAllocatorForClassLoader(art_klass->GetClassLoader()));
// Add all the declared methods to the map
for (auto& m : art_klass->GetDeclaredMethods(art::kRuntimePointerSize)) {
ctx.obsolete_methods.insert(&m);
@@ -631,16 +491,141 @@
}
}
-// TODO It should be possible to only deoptimize the specific obsolete methods.
-// TODO ReJitEverything can (sort of) fail. In certain cases it will skip deoptimizing some frames.
-// If one of these frames is an obsolete method we have a problem. b/33616143
-// TODO This shouldn't be necessary once we can ensure that the current method is not kept in
-// registers across suspend points.
-// TODO Pending b/33630159
-void Redefiner::EnsureObsoleteMethodsAreDeoptimized() {
- art::ScopedAssertNoThreadSuspension nts("Deoptimizing everything!");
- art::instrumentation::Instrumentation* i = runtime_->GetInstrumentation();
- i->ReJitEverything("libOpenJkdJvmti - Class Redefinition");
+// Try and get the declared method. First try to get a virtual method then a direct method if that's
+// not found.
+static art::ArtMethod* FindMethod(art::Handle<art::mirror::Class> klass,
+ const char* name,
+ art::Signature sig) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ArtMethod* m = klass->FindDeclaredVirtualMethod(name, sig, art::kRuntimePointerSize);
+ if (m == nullptr) {
+ m = klass->FindDeclaredDirectMethod(name, sig, art::kRuntimePointerSize);
+ }
+ return m;
+}
+
+bool Redefiner::ClassRedefinition::CheckSameMethods() {
+ art::StackHandleScope<1> hs(driver_->self_);
+ art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass()));
+ DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
+
+ art::ClassDataItemIterator new_iter(*dex_file_,
+ dex_file_->GetClassData(dex_file_->GetClassDef(0)));
+
+ // Make sure we have the same number of methods.
+ uint32_t num_new_method = new_iter.NumVirtualMethods() + new_iter.NumDirectMethods();
+ uint32_t num_old_method = h_klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size();
+ if (num_new_method != num_old_method) {
+ bool bigger = num_new_method > num_old_method;
+ RecordFailure(bigger ? ERR(UNSUPPORTED_REDEFINITION_METHOD_ADDED)
+ : ERR(UNSUPPORTED_REDEFINITION_METHOD_DELETED),
+ StringPrintf("Total number of declared methods changed from %d to %d",
+ num_old_method, num_new_method));
+ return false;
+ }
+
+ // Skip all of the fields. We should have already checked this.
+ while (new_iter.HasNextStaticField() || new_iter.HasNextInstanceField()) {
+ new_iter.Next();
+ }
+ // Check each of the methods. NB we don't need to specifically check for removals since the 2 dex
+ // files have the same number of methods, which means there must be an equal amount of additions
+ // and removals.
+ for (; new_iter.HasNextVirtualMethod() || new_iter.HasNextDirectMethod(); new_iter.Next()) {
+ // Get the data on the method we are searching for
+ const art::DexFile::MethodId& new_method_id = dex_file_->GetMethodId(new_iter.GetMemberIndex());
+ const char* new_method_name = dex_file_->GetMethodName(new_method_id);
+ art::Signature new_method_signature = dex_file_->GetMethodSignature(new_method_id);
+ art::ArtMethod* old_method = FindMethod(h_klass, new_method_name, new_method_signature);
+ // If we got past the check for the same number of methods above that means there must be at
+ // least one added and one removed method. We will return the ADDED failure message since it is
+ // easier to get a useful error report for it.
+ if (old_method == nullptr) {
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_METHOD_ADDED),
+ StringPrintf("Unknown method '%s' (sig: %s) was added!",
+ new_method_name,
+ new_method_signature.ToString().c_str()));
+ return false;
+ }
+ // Since direct methods have different flags than virtual ones (specifically direct methods must
+ // have kAccPrivate or kAccStatic or kAccConstructor flags) we can tell if a method changes from
+ // virtual to direct.
+ uint32_t new_flags = new_iter.GetMethodAccessFlags();
+ if (new_flags != (old_method->GetAccessFlags() & art::kAccValidMethodFlags)) {
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED),
+ StringPrintf("method '%s' (sig: %s) had different access flags",
+ new_method_name,
+ new_method_signature.ToString().c_str()));
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Redefiner::ClassRedefinition::CheckSameFields() {
+ art::StackHandleScope<1> hs(driver_->self_);
+ art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass()));
+ DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
+ art::ClassDataItemIterator new_iter(*dex_file_,
+ dex_file_->GetClassData(dex_file_->GetClassDef(0)));
+ const art::DexFile& old_dex_file = h_klass->GetDexFile();
+ art::ClassDataItemIterator old_iter(old_dex_file,
+ old_dex_file.GetClassData(*h_klass->GetClassDef()));
+ // Instance and static fields can be differentiated by their flags so no need to check them
+ // separately.
+ while (new_iter.HasNextInstanceField() || new_iter.HasNextStaticField()) {
+ // Get the data on the method we are searching for
+ const art::DexFile::FieldId& new_field_id = dex_file_->GetFieldId(new_iter.GetMemberIndex());
+ const char* new_field_name = dex_file_->GetFieldName(new_field_id);
+ const char* new_field_type = dex_file_->GetFieldTypeDescriptor(new_field_id);
+
+ if (!(old_iter.HasNextInstanceField() || old_iter.HasNextStaticField())) {
+ // We are missing the old version of this method!
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+ StringPrintf("Unknown field '%s' (type: %s) added!",
+ new_field_name,
+ new_field_type));
+ return false;
+ }
+
+ const art::DexFile::FieldId& old_field_id = old_dex_file.GetFieldId(old_iter.GetMemberIndex());
+ const char* old_field_name = old_dex_file.GetFieldName(old_field_id);
+ const char* old_field_type = old_dex_file.GetFieldTypeDescriptor(old_field_id);
+
+ // Check name and type.
+ if (strcmp(old_field_name, new_field_name) != 0 ||
+ strcmp(old_field_type, new_field_type) != 0) {
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+ StringPrintf("Field changed from '%s' (sig: %s) to '%s' (sig: %s)!",
+ old_field_name,
+ old_field_type,
+ new_field_name,
+ new_field_type));
+ return false;
+ }
+
+ // Since static fields have different flags than instance ones (specifically static fields must
+ // have the kAccStatic flag) we can tell if a field changes from static to instance.
+ if (new_iter.GetFieldAccessFlags() != old_iter.GetFieldAccessFlags()) {
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+ StringPrintf("Field '%s' (sig: %s) had different access flags",
+ new_field_name,
+ new_field_type));
+ return false;
+ }
+
+ new_iter.Next();
+ old_iter.Next();
+ }
+ if (old_iter.HasNextInstanceField() || old_iter.HasNextStaticField()) {
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+ StringPrintf("field '%s' (sig: %s) is missing!",
+ old_dex_file.GetFieldName(old_dex_file.GetFieldId(
+ old_iter.GetMemberIndex())),
+ old_dex_file.GetFieldTypeDescriptor(old_dex_file.GetFieldId(
+ old_iter.GetMemberIndex()))));
+ return false;
+ }
+ return true;
}
bool Redefiner::ClassRedefinition::CheckClass() {
@@ -854,31 +839,85 @@
DISALLOW_COPY_AND_ASSIGN(RedefinitionDataHolder);
};
+// Looks through the previously allocated cookies to see if we need to update them with another new
+// dexfile. This is so that even if multiple classes with the same classloader are redefined at
+// once they are all added to the classloader.
+bool Redefiner::ClassRedefinition::AllocateAndRememberNewDexFileCookie(
+ int32_t klass_index,
+ art::Handle<art::mirror::ClassLoader> source_class_loader,
+ art::Handle<art::mirror::Object> dex_file_obj,
+ /*out*/RedefinitionDataHolder* holder) {
+ art::StackHandleScope<2> hs(driver_->self_);
+ art::MutableHandle<art::mirror::LongArray> old_cookie(
+ hs.NewHandle<art::mirror::LongArray>(nullptr));
+ bool has_older_cookie = false;
+ // See if we already have a cookie that a previous redefinition got from the same classloader.
+ for (int32_t i = 0; i < klass_index; i++) {
+ if (holder->GetSourceClassLoader(i) == source_class_loader.Get()) {
+ // Since every instance of this classloader should have the same cookie associated with it we
+ // can stop looking here.
+ has_older_cookie = true;
+ old_cookie.Assign(holder->GetNewDexFileCookie(i));
+ break;
+ }
+ }
+ if (old_cookie.IsNull()) {
+ // No older cookie. Get it directly from the dex_file_obj
+ // We should not have seen this classloader elsewhere.
+ CHECK(!has_older_cookie);
+ old_cookie.Assign(ClassLoaderHelper::GetDexFileCookie(dex_file_obj));
+ }
+ // Use the old cookie to generate the new one with the new DexFile* added in.
+ art::Handle<art::mirror::LongArray>
+ new_cookie(hs.NewHandle(ClassLoaderHelper::AllocateNewDexFileCookie(driver_->self_,
+ old_cookie,
+ dex_file_.get())));
+ // Make sure the allocation worked.
+ if (new_cookie.IsNull()) {
+ return false;
+ }
+
+ // Save the cookie.
+ holder->SetNewDexFileCookie(klass_index, new_cookie.Get());
+ // If there are other copies of this same classloader we need to make sure that we all have the
+ // same cookie.
+ if (has_older_cookie) {
+ for (int32_t i = 0; i < klass_index; i++) {
+ // We will let the GC take care of the cookie we allocated for this one.
+ if (holder->GetSourceClassLoader(i) == source_class_loader.Get()) {
+ holder->SetNewDexFileCookie(i, new_cookie.Get());
+ }
+ }
+ }
+
+ return true;
+}
+
bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
int32_t klass_index, /*out*/RedefinitionDataHolder* holder) {
+ art::ScopedObjectAccessUnchecked soa(driver_->self_);
art::StackHandleScope<2> hs(driver_->self_);
holder->SetMirrorClass(klass_index, GetMirrorClass());
// This shouldn't allocate
art::Handle<art::mirror::ClassLoader> loader(hs.NewHandle(GetClassLoader()));
- holder->SetSourceClassLoader(klass_index, loader.Get());
- if (loader.Get() == nullptr) {
- // TODO Better error msg.
- RecordFailure(ERR(INTERNAL), "Unable to find class loader!");
- return false;
- }
- art::Handle<art::mirror::Object> dex_file_obj(hs.NewHandle(FindSourceDexFileObject(loader)));
- holder->SetJavaDexFile(klass_index, dex_file_obj.Get());
- if (dex_file_obj.Get() == nullptr) {
- // TODO Better error msg.
- RecordFailure(ERR(INTERNAL), "Unable to find class loader!");
- return false;
- }
- holder->SetNewDexFileCookie(klass_index, AllocateDexFileCookie(dex_file_obj));
- if (holder->GetNewDexFileCookie(klass_index) == nullptr) {
- driver_->self_->AssertPendingOOMException();
- driver_->self_->ClearException();
- RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate dex file array for class loader");
- return false;
+ // The bootclasspath is handled specially so it doesn't have a j.l.DexFile.
+ if (!art::ClassLinker::IsBootClassLoader(soa, loader.Get())) {
+ holder->SetSourceClassLoader(klass_index, loader.Get());
+ art::Handle<art::mirror::Object> dex_file_obj(hs.NewHandle(
+ ClassLoaderHelper::FindSourceDexFileObject(driver_->self_, loader)));
+ holder->SetJavaDexFile(klass_index, dex_file_obj.Get());
+ if (dex_file_obj.Get() == nullptr) {
+ // TODO Better error msg.
+ RecordFailure(ERR(INTERNAL), "Unable to find dex file!");
+ return false;
+ }
+ // Allocate the new dex file cookie.
+ if (!AllocateAndRememberNewDexFileCookie(klass_index, loader, dex_file_obj, holder)) {
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
+ RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate dex file array for class loader");
+ return false;
+ }
}
holder->SetNewDexCache(klass_index, CreateNewDexCache(loader));
if (holder->GetNewDexCache(klass_index) == nullptr) {
@@ -965,6 +1004,13 @@
// cleaned up by the GC eventually.
return result_;
}
+ int32_t counter = 0;
+ for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ if (holder.GetSourceClassLoader(counter) == nullptr) {
+ runtime_->GetClassLinker()->AppendToBootClassPath(self_, redef.GetDexFile());
+ }
+ counter++;
+ }
// Disable GC and wait for it to be done if we are a moving GC. This is fine since we are done
// allocating so no deadlocks.
art::gc::Heap* heap = runtime_->GetHeap();
@@ -983,24 +1029,20 @@
// TODO We need to update all debugger MethodIDs so they note the method they point to is
// obsolete or implement some other well defined semantics.
// TODO We need to decide on & implement semantics for JNI jmethodids when we redefine methods.
- int32_t cnt = 0;
+ counter = 0;
for (Redefiner::ClassRedefinition& redef : redefinitions_) {
- art::mirror::Class* klass = holder.GetMirrorClass(cnt);
- redef.UpdateJavaDexFile(holder.GetJavaDexFile(cnt), holder.GetNewDexFileCookie(cnt));
+ art::ScopedAssertNoThreadSuspension nts("Updating runtime objects for redefinition");
+ if (holder.GetSourceClassLoader(counter) != nullptr) {
+ ClassLoaderHelper::UpdateJavaDexFile(holder.GetJavaDexFile(counter),
+ holder.GetNewDexFileCookie(counter));
+ }
+ art::mirror::Class* klass = holder.GetMirrorClass(counter);
// TODO Rewrite so we don't do a stack walk for each and every class.
redef.FindAndAllocateObsoleteMethods(klass);
- redef.UpdateClass(klass, holder.GetNewDexCache(cnt), holder.GetOriginalDexFileBytes(cnt));
- cnt++;
+ redef.UpdateClass(klass, holder.GetNewDexCache(counter),
+ holder.GetOriginalDexFileBytes(counter));
+ counter++;
}
- // Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
- // pointers to their ArtMethod's stashed in registers that they then use to attempt to hit the
- // DexCache. (b/33630159)
- // TODO This can fail (leave some methods optimized) near runtime methods (including
- // quick-to-interpreter transition function).
- // TODO We probably don't need this at all once we have a way to ensure that the
- // current_art_method is never stashed in a (physical) register by the JIT and lost to the
- // stack-walker.
- EnsureObsoleteMethodsAreDeoptimized();
// TODO Verify the new Class.
// TODO Shrink the obsolete method maps if possible?
// TODO find appropriate class loader.
@@ -1102,24 +1144,6 @@
ext->SetOriginalDexFileBytes(original_dex_file);
}
-void Redefiner::ClassRedefinition::UpdateJavaDexFile(
- art::ObjPtr<art::mirror::Object> java_dex_file,
- art::ObjPtr<art::mirror::LongArray> new_cookie) {
- art::ArtField* internal_cookie_field = java_dex_file->GetClass()->FindDeclaredInstanceField(
- "mInternalCookie", "Ljava/lang/Object;");
- art::ArtField* cookie_field = java_dex_file->GetClass()->FindDeclaredInstanceField(
- "mCookie", "Ljava/lang/Object;");
- CHECK(internal_cookie_field != nullptr);
- art::ObjPtr<art::mirror::LongArray> orig_internal_cookie(
- internal_cookie_field->GetObject(java_dex_file)->AsLongArray());
- art::ObjPtr<art::mirror::LongArray> orig_cookie(
- cookie_field->GetObject(java_dex_file)->AsLongArray());
- internal_cookie_field->SetObject<false>(java_dex_file, new_cookie);
- if (!orig_cookie.IsNull()) {
- cookie_field->SetObject<false>(java_dex_file, new_cookie);
- }
-}
-
// This function does all (java) allocations we need to do for the Class being redefined.
// TODO Change this name maybe?
bool Redefiner::ClassRedefinition::EnsureClassAllocationsFinished() {
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 29a7e1f..3209abb 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -96,6 +96,11 @@
static jvmtiError IsModifiableClass(jvmtiEnv* env, jclass klass, jboolean* is_redefinable);
+ static std::unique_ptr<art::MemMap> MoveDataToMemMap(const std::string& original_location,
+ jint data_len,
+ const unsigned char* dex_data,
+ std::string* error_msg);
+
private:
class ClassRedefinition {
public:
@@ -122,18 +127,13 @@
art::mirror::Class* GetMirrorClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
art::mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(art::Locks::mutator_lock_);
- // This finds the java.lang.DexFile we will add the native DexFile to as part of the classpath.
- // TODO Make sure the DexFile object returned is the one that the klass_ actually comes from.
- art::mirror::Object* FindSourceDexFileObject(art::Handle<art::mirror::ClassLoader> loader)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
+ const art::DexFile& GetDexFile() {
+ return *dex_file_;
+ }
art::mirror::DexCache* CreateNewDexCache(art::Handle<art::mirror::ClassLoader> loader)
REQUIRES_SHARED(art::Locks::mutator_lock_);
- // Allocates and fills the new DexFileCookie
- art::mirror::LongArray* AllocateDexFileCookie(art::Handle<art::mirror::Object> j_dex_file_obj)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
-
// This may return nullptr with a OOME pending if allocation fails.
art::mirror::ByteArray* AllocateOrGetOriginalDexFileBytes()
REQUIRES_SHARED(art::Locks::mutator_lock_);
@@ -145,6 +145,13 @@
bool FinishRemainingAllocations(int32_t klass_index, /*out*/RedefinitionDataHolder* holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool AllocateAndRememberNewDexFileCookie(
+ int32_t klass_index,
+ art::Handle<art::mirror::ClassLoader> source_class_loader,
+ art::Handle<art::mirror::Object> dex_file_obj,
+ /*out*/RedefinitionDataHolder* holder)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
void FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass)
REQUIRES(art::Locks::mutator_lock_);
@@ -170,17 +177,11 @@
// Checks that the class can even be redefined.
bool CheckRedefinable() REQUIRES_SHARED(art::Locks::mutator_lock_);
- // Checks that the dex file does not add/remove methods.
- bool CheckSameMethods() REQUIRES_SHARED(art::Locks::mutator_lock_) {
- LOG(WARNING) << "methods are not checked for modification currently";
- return true;
- }
+ // Checks that the dex file does not add/remove methods, or change their modifiers or types.
+ bool CheckSameMethods() REQUIRES_SHARED(art::Locks::mutator_lock_);
- // Checks that the dex file does not modify fields
- bool CheckSameFields() REQUIRES_SHARED(art::Locks::mutator_lock_) {
- LOG(WARNING) << "Fields are not checked for modification currently";
- return true;
- }
+ // Checks that the dex file does not modify fields types or modifiers.
+ bool CheckSameFields() REQUIRES_SHARED(art::Locks::mutator_lock_);
void UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file,
art::ObjPtr<art::mirror::LongArray> new_cookie)
@@ -234,11 +235,6 @@
/*out*/std::string* error_msg)
REQUIRES_SHARED(art::Locks::mutator_lock_);
- static std::unique_ptr<art::MemMap> MoveDataToMemMap(const std::string& original_location,
- jint data_len,
- const unsigned char* dex_data,
- std::string* error_msg);
-
// TODO Put on all the lock qualifiers.
jvmtiError Run() REQUIRES_SHARED(art::Locks::mutator_lock_);
@@ -248,14 +244,6 @@
REQUIRES_SHARED(art::Locks::mutator_lock_);
void ReleaseAllDexFiles() REQUIRES_SHARED(art::Locks::mutator_lock_);
- // Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
- // pointers to their ArtMethods stashed in registers that they then use to attempt to hit the
- // DexCache.
- void EnsureObsoleteMethodsAreDeoptimized()
- REQUIRES(art::Locks::mutator_lock_)
- REQUIRES(!art::Locks::thread_list_lock_,
- !art::Locks::classlinker_classes_lock_);
-
void RecordFailure(jvmtiError result, const std::string& class_sig, const std::string& error_msg);
void RecordFailure(jvmtiError result, const std::string& error_msg) {
RecordFailure(result, "NO CLASS", error_msg);
diff --git a/runtime/openjdkjvmti/ti_search.cc b/runtime/openjdkjvmti/ti_search.cc
index 913d2b6..df80f85 100644
--- a/runtime/openjdkjvmti/ti_search.cc
+++ b/runtime/openjdkjvmti/ti_search.cc
@@ -34,15 +34,177 @@
#include "jni.h"
#include "art_jvmti.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "class_linker.h"
#include "dex_file.h"
+#include "jni_internal.h"
+#include "mirror/class-inl.h"
+#include "mirror/object.h"
+#include "mirror/string.h"
+#include "obj_ptr-inl.h"
#include "runtime.h"
+#include "runtime_callbacks.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedLocalRef.h"
+#include "ti_phase.h"
+#include "thread-inl.h"
+#include "thread_list.h"
namespace openjdkjvmti {
+static std::vector<std::string> gSystemOnloadSegments;
+
+static art::ObjPtr<art::mirror::Object> GetSystemProperties(art::Thread* self,
+ art::ClassLinker* class_linker)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ObjPtr<art::mirror::Class> system_class =
+ class_linker->LookupClass(self, "Ljava/lang/System;", nullptr);
+ DCHECK(system_class != nullptr);
+ DCHECK(system_class->IsInitialized());
+
+ art::ArtField* props_field =
+ system_class->FindDeclaredStaticField("props", "Ljava/util/Properties;");
+ DCHECK(props_field != nullptr);
+
+ art::ObjPtr<art::mirror::Object> props_obj = props_field->GetObject(system_class);
+ DCHECK(props_obj != nullptr);
+
+ return props_obj;
+}
+
+static void Update() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (gSystemOnloadSegments.empty()) {
+ return;
+ }
+
+ // In the on-load phase we have to modify java.class.path to influence the system classloader.
+ // As this is an unmodifiable system property, we have to access the "defaults" field.
+ art::ClassLinker* class_linker = art::Runtime::Current()->GetClassLinker();
+ DCHECK(class_linker != nullptr);
+ art::Thread* self = art::Thread::Current();
+
+ // Prepare: collect classes, fields and methods.
+ art::ObjPtr<art::mirror::Class> properties_class =
+ class_linker->LookupClass(self, "Ljava/util/Properties;", nullptr);
+ DCHECK(properties_class != nullptr);
+
+ ScopedLocalRef<jobject> defaults_jobj(self->GetJniEnv(), nullptr);
+ {
+ art::ObjPtr<art::mirror::Object> props_obj = GetSystemProperties(self, class_linker);
+
+ art::ArtField* defaults_field =
+ properties_class->FindDeclaredInstanceField("defaults", "Ljava/util/Properties;");
+ DCHECK(defaults_field != nullptr);
+
+ art::ObjPtr<art::mirror::Object> defaults_obj = defaults_field->GetObject(props_obj);
+ DCHECK(defaults_obj != nullptr);
+ defaults_jobj.reset(self->GetJniEnv()->AddLocalReference<jobject>(defaults_obj));
+ }
+
+ art::ArtMethod* get_property =
+ properties_class->FindDeclaredVirtualMethod(
+ "getProperty",
+ "(Ljava/lang/String;)Ljava/lang/String;",
+ art::kRuntimePointerSize);
+ DCHECK(get_property != nullptr);
+ art::ArtMethod* set_property =
+ properties_class->FindDeclaredVirtualMethod(
+ "setProperty",
+ "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/Object;",
+ art::kRuntimePointerSize);
+ DCHECK(set_property != nullptr);
+
+ // This is an allocation. Do this late to avoid the need for handles.
+ ScopedLocalRef<jobject> cp_jobj(self->GetJniEnv(), nullptr);
+ {
+ art::ObjPtr<art::mirror::Object> cp_key =
+ art::mirror::String::AllocFromModifiedUtf8(self, "java.class.path");
+ if (cp_key == nullptr) {
+ self->AssertPendingOOMException();
+ self->ClearException();
+ return;
+ }
+ cp_jobj.reset(self->GetJniEnv()->AddLocalReference<jobject>(cp_key));
+ }
+
+ // OK, now get the current value.
+ std::string str_value;
+ {
+ ScopedLocalRef<jobject> old_value(self->GetJniEnv(),
+ self->GetJniEnv()->CallObjectMethod(
+ defaults_jobj.get(),
+ art::jni::EncodeArtMethod(get_property),
+ cp_jobj.get()));
+ DCHECK(old_value.get() != nullptr);
+
+ str_value = self->DecodeJObject(old_value.get())->AsString()->ToModifiedUtf8();
+ self->GetJniEnv()->DeleteLocalRef(old_value.release());
+ }
+
+ // Update the value by appending the new segments.
+ for (const std::string& segment : gSystemOnloadSegments) {
+ if (!str_value.empty()) {
+ str_value += ":";
+ }
+ str_value += segment;
+ }
+ gSystemOnloadSegments.clear();
+
+ // Create the new value object.
+ ScopedLocalRef<jobject> new_val_jobj(self->GetJniEnv(), nullptr);
+ {
+ art::ObjPtr<art::mirror::Object> new_value =
+ art::mirror::String::AllocFromModifiedUtf8(self, str_value.c_str());
+ if (new_value == nullptr) {
+ self->AssertPendingOOMException();
+ self->ClearException();
+ return;
+ }
+
+ new_val_jobj.reset(self->GetJniEnv()->AddLocalReference<jobject>(new_value));
+ }
+
+ // Write to the defaults.
+ ScopedLocalRef<jobject> res_obj(self->GetJniEnv(),
+ self->GetJniEnv()->CallObjectMethod(defaults_jobj.get(),
+ art::jni::EncodeArtMethod(set_property),
+ cp_jobj.get(),
+ new_val_jobj.get()));
+ if (self->IsExceptionPending()) {
+ self->ClearException();
+ return;
+ }
+}
+
+struct SearchCallback : public art::RuntimePhaseCallback {
+ void NextRuntimePhase(RuntimePhase phase) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (phase == RuntimePhase::kStart) {
+ // It's time to update the system properties.
+ Update();
+ }
+ }
+};
+
+static SearchCallback gSearchCallback;
+
+void SearchUtil::Register() {
+ art::Runtime* runtime = art::Runtime::Current();
+
+ art::ScopedThreadStateChange stsc(art::Thread::Current(),
+ art::ThreadState::kWaitingForDebuggerToAttach);
+ art::ScopedSuspendAll ssa("Add search callback");
+ runtime->GetRuntimeCallbacks()->AddRuntimePhaseCallback(&gSearchCallback);
+}
+
+void SearchUtil::Unregister() {
+ art::ScopedThreadStateChange stsc(art::Thread::Current(),
+ art::ThreadState::kWaitingForDebuggerToAttach);
+ art::ScopedSuspendAll ssa("Remove search callback");
+ art::Runtime* runtime = art::Runtime::Current();
+ runtime->GetRuntimeCallbacks()->RemoveRuntimePhaseCallback(&gSearchCallback);
+}
+
jvmtiError SearchUtil::AddToBootstrapClassLoaderSearch(jvmtiEnv* env ATTRIBUTE_UNUSED,
const char* segment) {
art::Runtime* current = art::Runtime::Current();
@@ -78,14 +240,21 @@
return ERR(NULL_POINTER);
}
- art::Runtime* current = art::Runtime::Current();
- if (current == nullptr) {
+ jvmtiPhase phase = PhaseUtil::GetPhaseUnchecked();
+
+ if (phase == jvmtiPhase::JVMTI_PHASE_ONLOAD) {
+ // We could try and see whether it is a valid path. We could also try to allocate Java
+ // objects to avoid later OOME.
+ gSystemOnloadSegments.push_back(segment);
+ return ERR(NONE);
+ } else if (phase != jvmtiPhase::JVMTI_PHASE_LIVE) {
return ERR(WRONG_PHASE);
}
- jobject sys_class_loader = current->GetSystemClassLoader();
+
+ jobject sys_class_loader = art::Runtime::Current()->GetSystemClassLoader();
if (sys_class_loader == nullptr) {
- // TODO: Support classpath change in OnLoad.
- return ERR(WRONG_PHASE);
+ // This is unexpected.
+ return ERR(INTERNAL);
}
// We'll use BaseDexClassLoader.addDexPath, as it takes care of array resizing etc. As a downside,
diff --git a/runtime/openjdkjvmti/ti_search.h b/runtime/openjdkjvmti/ti_search.h
index 6a52e80..cd7b4be 100644
--- a/runtime/openjdkjvmti/ti_search.h
+++ b/runtime/openjdkjvmti/ti_search.h
@@ -32,12 +32,17 @@
#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_SEARCH_H_
#define ART_RUNTIME_OPENJDKJVMTI_TI_SEARCH_H_
+#include <vector>
+
#include "jvmti.h"
namespace openjdkjvmti {
class SearchUtil {
public:
+ static void Register();
+ static void Unregister();
+
static jvmtiError AddToBootstrapClassLoaderSearch(jvmtiEnv* env, const char* segment);
static jvmtiError AddToSystemClassLoaderSearch(jvmtiEnv* env, const char* segment);
diff --git a/runtime/openjdkjvmti/ti_stack.cc b/runtime/openjdkjvmti/ti_stack.cc
index 4cf55a6..b5a6c6e 100644
--- a/runtime/openjdkjvmti/ti_stack.cc
+++ b/runtime/openjdkjvmti/ti_stack.cc
@@ -377,7 +377,8 @@
jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
jvmtiStackInfo& new_stack_info = stack_info[i];
- jthread thread_peer = current->GetJniEnv()->AddLocalReference<jthread>(threads[i]->GetPeer());
+ jthread thread_peer = current->GetJniEnv()->AddLocalReference<jthread>(
+ threads[i]->GetPeerFromOtherThread());
new_stack_info.thread = thread_peer;
if (old_stack_info.frame_count > 0) {
@@ -453,7 +454,7 @@
}
// Get the peer, and check whether we know it.
- art::ObjPtr<art::mirror::Object> peer = thread->GetPeer();
+ art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
for (size_t index = 0; index != handles.size(); ++index) {
if (peer == handles[index].Get()) {
// Found the thread.
diff --git a/runtime/openjdkjvmti/ti_thread.cc b/runtime/openjdkjvmti/ti_thread.cc
index 9f81d6b..00d4144 100644
--- a/runtime/openjdkjvmti/ti_thread.cc
+++ b/runtime/openjdkjvmti/ti_thread.cc
@@ -61,11 +61,14 @@
}
return self->GetJniEnv()->AddLocalReference<jthread>(self->GetPeer());
}
- void Post(art::Thread* self, ArtJvmtiEvent type) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ template <ArtJvmtiEvent kEvent>
+ void Post(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_) {
DCHECK_EQ(self, art::Thread::Current());
ScopedLocalRef<jthread> thread(self->GetJniEnv(), GetThreadObject(self));
art::ScopedThreadSuspension sts(self, art::ThreadState::kNative);
- event_handler->DispatchEvent(self, type, self->GetJniEnv(), thread.get());
+ event_handler->DispatchEvent<kEvent>(self,
+ reinterpret_cast<JNIEnv*>(self->GetJniEnv()),
+ thread.get());
}
void ThreadStart(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
@@ -81,11 +84,11 @@
}
return;
}
- Post(self, ArtJvmtiEvent::kThreadStart);
+ Post<ArtJvmtiEvent::kThreadStart>(self);
}
void ThreadDeath(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
- Post(self, ArtJvmtiEvent::kThreadEnd);
+ Post<ArtJvmtiEvent::kThreadEnd>(self);
}
void NextRuntimePhase(RuntimePhase phase) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
@@ -93,7 +96,7 @@
// We moved to VMInit. Report the main thread as started (it was attached early, and must
// not be reported until Init.
started = true;
- Post(art::Thread::Current(), ArtJvmtiEvent::kThreadStart);
+ Post<ArtJvmtiEvent::kThreadStart>(art::Thread::Current());
}
}
@@ -197,7 +200,7 @@
info_ptr->is_daemon = self->IsDaemon();
- art::ObjPtr<art::mirror::Object> peer = self->GetPeer();
+ art::ObjPtr<art::mirror::Object> peer = self->GetPeerFromOtherThread();
// ThreadGroup.
if (peer != nullptr) {
@@ -455,7 +458,7 @@
continue;
}
- art::ObjPtr<art::mirror::Object> peer = thread->GetPeer();
+ art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
if (peer != nullptr) {
peers.push_back(peer);
}
diff --git a/runtime/openjdkjvmti/ti_threadgroup.cc b/runtime/openjdkjvmti/ti_threadgroup.cc
index 35b1bfd..e63ce65 100644
--- a/runtime/openjdkjvmti/ti_threadgroup.cc
+++ b/runtime/openjdkjvmti/ti_threadgroup.cc
@@ -174,7 +174,7 @@
if (t->IsStillStarting()) {
continue;
}
- art::ObjPtr<art::mirror::Object> peer = t->GetPeer();
+ art::ObjPtr<art::mirror::Object> peer = t->GetPeerFromOtherThread();
if (peer == nullptr) {
continue;
}
diff --git a/runtime/openjdkjvmti/transform.cc b/runtime/openjdkjvmti/transform.cc
index af4fb71..2fec631 100644
--- a/runtime/openjdkjvmti/transform.cc
+++ b/runtime/openjdkjvmti/transform.cc
@@ -68,19 +68,17 @@
for (ArtClassDefinition& def : *definitions) {
jint new_len = -1;
unsigned char* new_data = nullptr;
- // Static casts are so that we get the right template initialization for the special event
- // handling code required by the ClassFileLoadHooks.
- gEventHandler.DispatchEvent(self,
- ArtJvmtiEvent::kClassFileLoadHookRetransformable,
- GetJniEnv(env),
- static_cast<jclass>(def.klass),
- static_cast<jobject>(def.loader),
- static_cast<const char*>(def.name.c_str()),
- static_cast<jobject>(def.protection_domain),
- static_cast<jint>(def.dex_len),
- static_cast<const unsigned char*>(def.dex_data.get()),
- static_cast<jint*>(&new_len),
- static_cast<unsigned char**>(&new_data));
+ gEventHandler.DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
+ self,
+ GetJniEnv(env),
+ def.klass,
+ def.loader,
+ def.name.c_str(),
+ def.protection_domain,
+ def.dex_len,
+ static_cast<const unsigned char*>(def.dex_data.get()),
+ &new_len,
+ &new_data);
def.SetNewDexData(env, new_len, new_data);
}
return OK;
@@ -139,20 +137,6 @@
return OK;
}
-static jvmtiError CopyDataIntoJvmtiBuffer(ArtJvmTiEnv* env,
- const unsigned char* source,
- jint len,
- /*out*/unsigned char** dest) {
- jvmtiError res = env->Allocate(len, dest);
- if (res != OK) {
- return res;
- }
- memcpy(reinterpret_cast<void*>(*dest),
- reinterpret_cast<const void*>(source),
- len);
- return OK;
-}
-
jvmtiError Transformer::GetDexDataForRetransformation(ArtJvmTiEnv* env,
art::Handle<art::mirror::Class> klass,
/*out*/jint* dex_data_len,
@@ -195,7 +179,9 @@
}
def->klass = klass;
def->loader = soa.AddLocalReference<jobject>(hs_klass->GetClassLoader());
- def->name = art::mirror::Class::ComputeName(hs_klass)->ToModifiedUtf8();
+ std::string descriptor_store;
+ std::string descriptor(hs_klass->GetDescriptor(&descriptor_store));
+ def->name = descriptor.substr(1, descriptor.size() - 2);
// TODO is this always null?
def->protection_domain = nullptr;
if (def->dex_data.get() == nullptr) {
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index d1ad77c..9113f83 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -300,8 +300,6 @@
.Define("-Xplugin:_")
.WithType<std::vector<Plugin>>().AppendValues()
.IntoKey(M::Plugins)
- .Define("-Xfully-deoptable")
- .IntoKey(M::FullyDeoptable)
.Define("-XX:ThreadSuspendTimeout=_") // in ms
.WithType<MillisecondsToNanoseconds>() // store as ns
.IntoKey(M::ThreadSuspendTimeout)
@@ -599,42 +597,6 @@
args.Set(M::HeapGrowthLimit, args.GetOrDefault(M::MemoryMaximumSize));
}
- if (args.GetOrDefault(M::Experimental) & ExperimentalFlags::kRuntimePlugins) {
- LOG(WARNING) << "Experimental runtime plugin support has been enabled. No guarantees are made "
- << "about stability or usage of this plugin support. Use at your own risk. Do "
- << "not attempt to write shipping code that relies on the implementation of "
- << "runtime plugins.";
- } else if (!args.GetOrDefault(M::Plugins).empty()) {
- LOG(WARNING) << "Experimental runtime plugin support has not been enabled. Ignored options: ";
- for (const auto& op : args.GetOrDefault(M::Plugins)) {
- LOG(WARNING) << " -plugin:" << op.GetLibrary();
- }
- }
-
- if (args.GetOrDefault(M::Experimental) & ExperimentalFlags::kAgents) {
- LOG(WARNING) << "Experimental runtime agent support has been enabled. No guarantees are made "
- << "the completeness, accuracy, reliability, or stability of the agent "
- << "implementation. Use at your own risk. Do not attempt to write shipping code "
- << "that relies on the implementation of any part of this api.";
- } else if (!args.GetOrDefault(M::AgentLib).empty() || !args.GetOrDefault(M::AgentPath).empty()) {
- LOG(WARNING) << "agent support has not been enabled. Enable experimental agent "
- << " support with '-XExperimental:agent'. Ignored options are:";
- for (const auto& op : args.GetOrDefault(M::AgentLib)) {
- if (op.HasArgs()) {
- LOG(WARNING) << " -agentlib:" << op.GetName() << "=" << op.GetArgs();
- } else {
- LOG(WARNING) << " -agentlib:" << op.GetName();
- }
- }
- for (const auto& op : args.GetOrDefault(M::AgentPath)) {
- if (op.HasArgs()) {
- LOG(WARNING) << " -agentpath:" << op.GetName() << "=" << op.GetArgs();
- } else {
- LOG(WARNING) << " -agentpath:" << op.GetName();
- }
- }
- }
-
*runtime_options = std::move(args);
return true;
}
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index b809c3e..bf99509 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -347,9 +347,11 @@
callee_method_ = method;
return true;
} else if (!single_frame_deopt_ &&
- !Runtime::Current()->IsDeoptimizeable(GetCurrentQuickFramePc())) {
+ !Runtime::Current()->IsAsyncDeoptimizeable(GetCurrentQuickFramePc())) {
// We hit some code that's not deoptimizeable. However, Single-frame deoptimization triggered
// from compiled code is always allowed since HDeoptimize always saves the full environment.
+ LOG(WARNING) << "Got request to deoptimize un-deoptimizable method "
+ << method->PrettyMethod();
FinishStackWalk();
return false; // End stack walk.
} else {
@@ -405,7 +407,8 @@
CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
const size_t number_of_vregs = m->GetCodeItem()->registers_size_;
- uint32_t register_mask = stack_map.GetRegisterMask(encoding.stack_map_encoding);
+ uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, stack_map);
+ BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
DexRegisterMap vreg_map = IsInInlinedFrame()
? code_info.GetDexRegisterMapAtDepth(GetCurrentInliningDepth() - 1,
code_info.GetInlineInfoOf(stack_map, encoding),
@@ -438,8 +441,7 @@
const uint8_t* addr = reinterpret_cast<const uint8_t*>(GetCurrentQuickFrame()) + offset;
value = *reinterpret_cast<const uint32_t*>(addr);
uint32_t bit = (offset >> 2);
- if (stack_map.GetNumberOfStackMaskBits(encoding.stack_map_encoding) > bit &&
- stack_map.GetStackMaskBit(encoding.stack_map_encoding, bit)) {
+ if (bit < encoding.stack_mask_size_in_bits && stack_mask.LoadBit(bit)) {
is_reference = true;
}
break;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 06cd7ff..9609bee 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -114,6 +114,7 @@
#include "native/java_lang_Thread.h"
#include "native/java_lang_Throwable.h"
#include "native/java_lang_VMClassLoader.h"
+#include "native/java_lang_invoke_MethodHandleImpl.h"
#include "native/java_lang_ref_FinalizerReference.h"
#include "native/java_lang_ref_Reference.h"
#include "native/java_lang_reflect_Array.h"
@@ -244,7 +245,7 @@
force_native_bridge_(false),
is_native_bridge_loaded_(false),
is_native_debuggable_(false),
- is_fully_deoptable_(false),
+ is_java_debuggable_(false),
zygote_max_failed_boots_(0),
experimental_flags_(ExperimentalFlags::kNone),
oat_file_manager_(nullptr),
@@ -671,24 +672,6 @@
started_ = true;
- // Create the JIT either if we have to use JIT compilation or save profiling info.
- // TODO(calin): We use the JIT class as a proxy for JIT compilation and for
- // recoding profiles. Maybe we should consider changing the name to be more clear it's
- // not only about compiling. b/28295073.
- if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
- std::string error_msg;
- if (!IsZygote()) {
- // If we are the zygote then we need to wait until after forking to create the code cache
- // due to SELinux restrictions on r/w/x memory regions.
- CreateJit();
- } else if (jit_options_->UseJitCompilation()) {
- if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
- // Try to load compiler pre zygote to reduce PSS. b/27744947
- LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
- }
- }
- }
-
if (!IsImageDex2OatEnabled() || !GetHeap()->HasBootImageSpace()) {
ScopedObjectAccess soa(self);
StackHandleScope<2> hs(soa.Self());
@@ -713,6 +696,27 @@
Thread::FinishStartup();
+ // Create the JIT either if we have to use JIT compilation or save profiling info. This is
+ // done after FinishStartup as the JIT pool needs Java thread peers, which require the main
+ // ThreadGroup to exist.
+ //
+ // TODO(calin): We use the JIT class as a proxy for JIT compilation and for
+ // recoding profiles. Maybe we should consider changing the name to be more clear it's
+ // not only about compiling. b/28295073.
+ if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
+ std::string error_msg;
+ if (!IsZygote()) {
+ // If we are the zygote then we need to wait until after forking to create the code cache
+ // due to SELinux restrictions on r/w/x memory regions.
+ CreateJit();
+ } else if (jit_options_->UseJitCompilation()) {
+ if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
+ // Try to load compiler pre zygote to reduce PSS. b/27744947
+ LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
+ }
+ }
+ }
+
// Send the start phase event. We have to wait till here as this is when the main thread peer
// has just been generated, important root clinits have been run and JNI is completely functional.
{
@@ -825,14 +829,6 @@
return IsShuttingDownLocked();
}
-bool Runtime::IsDebuggable() const {
- if (IsFullyDeoptable()) {
- return true;
- }
- const OatFile* oat_file = GetOatFileManager().GetPrimaryOatFile();
- return oat_file != nullptr && oat_file->IsDebuggable();
-}
-
void Runtime::StartDaemonThreads() {
ScopedTrace trace(__FUNCTION__);
VLOG(startup) << "Runtime::StartDaemonThreads entering";
@@ -1038,6 +1034,12 @@
compiler_executable_ = runtime_options.ReleaseOrDefault(Opt::Compiler);
compiler_options_ = runtime_options.ReleaseOrDefault(Opt::CompilerOptions);
+ for (StringPiece option : Runtime::Current()->GetCompilerOptions()) {
+ if (option.starts_with("--debuggable")) {
+ SetJavaDebuggable(true);
+ break;
+ }
+ }
image_compiler_options_ = runtime_options.ReleaseOrDefault(Opt::ImageCompilerOptions);
image_location_ = runtime_options.GetOrDefault(Opt::Image);
@@ -1052,8 +1054,6 @@
verify_ = runtime_options.GetOrDefault(Opt::Verify);
allow_dex_file_fallback_ = !runtime_options.Exists(Opt::NoDexFileFallback);
- is_fully_deoptable_ = runtime_options.Exists(Opt::FullyDeoptable);
-
no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
@@ -1069,16 +1069,13 @@
experimental_flags_ = runtime_options.GetOrDefault(Opt::Experimental);
is_low_memory_mode_ = runtime_options.Exists(Opt::LowMemoryMode);
- if (experimental_flags_ & ExperimentalFlags::kRuntimePlugins) {
- plugins_ = runtime_options.ReleaseOrDefault(Opt::Plugins);
- }
- if (experimental_flags_ & ExperimentalFlags::kAgents) {
- agents_ = runtime_options.ReleaseOrDefault(Opt::AgentPath);
- // TODO Add back in -agentlib
- // for (auto lib : runtime_options.ReleaseOrDefault(Opt::AgentLib)) {
- // agents_.push_back(lib);
- // }
- }
+ plugins_ = runtime_options.ReleaseOrDefault(Opt::Plugins);
+ agents_ = runtime_options.ReleaseOrDefault(Opt::AgentPath);
+ // TODO Add back in -agentlib
+ // for (auto lib : runtime_options.ReleaseOrDefault(Opt::AgentLib)) {
+ // agents_.push_back(lib);
+ // }
+
XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize),
runtime_options.GetOrDefault(Opt::HeapGrowthLimit),
@@ -1261,6 +1258,11 @@
ScopedTrace trace2("AddImageStringsToTable");
GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces());
}
+ if (IsJavaDebuggable()) {
+ // Now that we have loaded the boot image, deoptimize its methods if we are running
+ // debuggable, as the code may have been compiled non-debuggable.
+ DeoptimizeBootImage();
+ }
} else {
std::vector<std::string> dex_filenames;
Split(boot_class_path_string_, ':', &dex_filenames);
@@ -1407,7 +1409,7 @@
}
// Is the process debuggable? Otherwise, do not attempt to load the plugin.
- if (!runtime->IsDebuggable()) {
+ if (!runtime->IsJavaDebuggable()) {
*error_msg = "Process is not debuggable.";
return false;
}
@@ -1539,6 +1541,7 @@
register_java_lang_Class(env);
register_java_lang_DexCache(env);
register_java_lang_Object(env);
+ register_java_lang_invoke_MethodHandleImpl(env);
register_java_lang_ref_FinalizerReference(env);
register_java_lang_reflect_Array(env);
register_java_lang_reflect_Constructor(env);
@@ -2207,9 +2210,15 @@
return verify_ == verifier::VerifyMode::kSoftFail;
}
-bool Runtime::IsDeoptimizeable(uintptr_t code) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return !heap_->IsInBootImageOatFile(reinterpret_cast<void *>(code));
+bool Runtime::IsAsyncDeoptimizeable(uintptr_t code) const {
+ // We only support async deopt (ie the compiled code is not explicitly asking for
+ // deopt, but something else like the debugger) in debuggable JIT code.
+ // We could look at the oat file where `code` is being defined,
+ // and check whether it's been compiled debuggable, but we decided to
+ // only rely on the JIT for debuggable apps.
+ return IsJavaDebuggable() &&
+ GetJit() != nullptr &&
+ GetJit()->GetCodeCache()->ContainsPc(reinterpret_cast<const void*>(code));
}
LinearAlloc* Runtime::CreateLinearAlloc() {
@@ -2293,4 +2302,43 @@
return callbacks_.get();
}
+// Used to patch boot image method entry point to interpreter bridge.
+class UpdateEntryPointsClassVisitor : public ClassVisitor {
+ public:
+ explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
+ : instrumentation_(instrumentation) {}
+
+ bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ for (auto& m : klass->GetMethods(pointer_size)) {
+ const void* code = m.GetEntryPointFromQuickCompiledCode();
+ if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
+ !m.IsNative() &&
+ !m.IsProxyMethod()) {
+ instrumentation_->UpdateMethodsCodeForJavaDebuggable(&m, GetQuickToInterpreterBridge());
+ }
+ }
+ return true;
+ }
+
+ private:
+ instrumentation::Instrumentation* const instrumentation_;
+};
+
+void Runtime::SetJavaDebuggable(bool value) {
+ is_java_debuggable_ = value;
+ // Do not call DeoptimizeBootImage just yet, the runtime may still be starting up.
+}
+
+void Runtime::DeoptimizeBootImage() {
+ // If we've already started and we are setting this runtime to debuggable,
+ // we patch entry points of methods in boot image to interpreter bridge, as
+ // boot image code may be AOT compiled as not debuggable.
+ if (!GetInstrumentation()->IsForcedInterpretOnly()) {
+ ScopedObjectAccess soa(Thread::Current());
+ UpdateEntryPointsClassVisitor visitor(GetInstrumentation());
+ GetClassLinker()->VisitClasses(&visitor);
+ }
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index f7d6810..30b1756 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -434,7 +434,7 @@
kInitialize
};
- jit::Jit* GetJit() {
+ jit::Jit* GetJit() const {
return jit_.get();
}
@@ -569,15 +569,14 @@
return jit_options_.get();
}
- bool IsDebuggable() const;
-
- bool IsFullyDeoptable() const {
- return is_fully_deoptable_;
+ bool IsJavaDebuggable() const {
+ return is_java_debuggable_;
}
- void SetFullyDeoptable(bool value) {
- is_fully_deoptable_ = value;
- }
+ void SetJavaDebuggable(bool value);
+
+ // Deoptimize the boot image, called for Java debuggable apps.
+ void DeoptimizeBootImage();
bool IsNativeDebuggable() const {
return is_native_debuggable_;
@@ -639,9 +638,9 @@
return zygote_no_threads_;
}
- // Returns if the code can be deoptimized. Code may be compiled with some
+ // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
// optimization that makes it impossible to deoptimize.
- bool IsDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a saved copy of the environment (getenv/setenv values).
// Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
@@ -863,8 +862,8 @@
// Whether we are running under native debugger.
bool is_native_debuggable_;
- // Whether we are expected to be deoptable at all points.
- bool is_fully_deoptable_;
+ // Whether Java code needs to be debuggable.
+ bool is_java_debuggable_;
// The maximum number of failed boots we allow before pruning the dalvik cache
// and trying again. This option is only inspected when we're running as a
diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc
index 0a996a9..495296c 100644
--- a/runtime/runtime_android.cc
+++ b/runtime/runtime_android.cc
@@ -14,56 +14,33 @@
* limitations under the License.
*/
-#include <signal.h>
-#include <string.h>
-#include <sys/utsname.h>
-#include <inttypes.h>
+#include "runtime.h"
-#include "base/logging.h"
-#include "base/mutex.h"
-#include "thread-inl.h"
-#include "utils.h"
+#include <signal.h>
+
+#include <cstring>
+
+#include "runtime_common.h"
namespace art {
-static constexpr bool kUseSignalHandler = false;
-
struct sigaction old_action;
-void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) {
- static bool handling_unexpected_signal = false;
- if (handling_unexpected_signal) {
- LogHelper::LogLineLowStack(__FILE__,
- __LINE__,
- ::android::base::FATAL_WITHOUT_ABORT,
- "HandleUnexpectedSignal reentered\n");
- _exit(1);
- }
- handling_unexpected_signal = true;
- gAborting++; // set before taking any locks
- MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
- Runtime* runtime = Runtime::Current();
- if (runtime != nullptr) {
- // Print this out first in case DumpObject faults.
- LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << runtime->GetFaultMessage();
- }
+void HandleUnexpectedSignalAndroid(int signal_number, siginfo_t* info, void* raw_context) {
+ HandleUnexpectedSignalCommon(signal_number, info, raw_context, /* running_on_linux */ false);
+
// Run the old signal handler.
old_action.sa_sigaction(signal_number, info, raw_context);
}
void Runtime::InitPlatformSignalHandlers() {
- if (kUseSignalHandler) {
- struct sigaction action;
- memset(&action, 0, sizeof(action));
- sigemptyset(&action.sa_mask);
- action.sa_sigaction = HandleUnexpectedSignal;
- // Use the three-argument sa_sigaction handler.
- action.sa_flags |= SA_SIGINFO;
- // Use the alternate signal stack so we can catch stack overflows.
- action.sa_flags |= SA_ONSTACK;
- int rc = 0;
- rc += sigaction(SIGSEGV, &action, &old_action);
- CHECK_EQ(rc, 0);
+ // Enable the signal handler dumping crash information to the logcat
+ // when the Android root is not "/system".
+ const char* android_root = getenv("ANDROID_ROOT");
+ if (android_root != nullptr && strcmp(android_root, "/system") != 0) {
+ InitPlatformSignalHandlersCommon(HandleUnexpectedSignalAndroid,
+ &old_action,
+ /* handle_timeout_signal */ false);
}
}
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 66eb2ec..f1e78b4 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -255,7 +255,7 @@
const DexFile& initial_dex_file,
const DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
/*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
- /*out*/DexFile::ClassDef const** final_dex_cache ATTRIBUTE_UNUSED)
+ /*out*/DexFile::ClassDef const** final_class_def ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
std::string location(initial_dex_file.GetLocation());
std::string event =
diff --git a/runtime/runtime_common.cc b/runtime/runtime_common.cc
new file mode 100644
index 0000000..3690129
--- /dev/null
+++ b/runtime/runtime_common.cc
@@ -0,0 +1,486 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "runtime_common.h"
+
+#include <signal.h>
+
+#include <cinttypes>
+#include <iostream>
+#include <sstream>
+#include <string>
+
+#include "android-base/stringprintf.h"
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "native_stack_dump.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+namespace art {
+
+using android::base::StringPrintf;
+
+static constexpr bool kUseSigRTTimeout = true;
+static constexpr bool kDumpNativeStackOnTimeout = true;
+
+const char* GetSignalName(int signal_number) {
+ switch (signal_number) {
+ case SIGABRT: return "SIGABRT";
+ case SIGBUS: return "SIGBUS";
+ case SIGFPE: return "SIGFPE";
+ case SIGILL: return "SIGILL";
+ case SIGPIPE: return "SIGPIPE";
+ case SIGSEGV: return "SIGSEGV";
+#if defined(SIGSTKFLT)
+ case SIGSTKFLT: return "SIGSTKFLT";
+#endif
+ case SIGTRAP: return "SIGTRAP";
+ }
+ return "??";
+}
+
+const char* GetSignalCodeName(int signal_number, int signal_code) {
+ // Try the signal-specific codes...
+ switch (signal_number) {
+ case SIGILL:
+ switch (signal_code) {
+ case ILL_ILLOPC: return "ILL_ILLOPC";
+ case ILL_ILLOPN: return "ILL_ILLOPN";
+ case ILL_ILLADR: return "ILL_ILLADR";
+ case ILL_ILLTRP: return "ILL_ILLTRP";
+ case ILL_PRVOPC: return "ILL_PRVOPC";
+ case ILL_PRVREG: return "ILL_PRVREG";
+ case ILL_COPROC: return "ILL_COPROC";
+ case ILL_BADSTK: return "ILL_BADSTK";
+ }
+ break;
+ case SIGBUS:
+ switch (signal_code) {
+ case BUS_ADRALN: return "BUS_ADRALN";
+ case BUS_ADRERR: return "BUS_ADRERR";
+ case BUS_OBJERR: return "BUS_OBJERR";
+ }
+ break;
+ case SIGFPE:
+ switch (signal_code) {
+ case FPE_INTDIV: return "FPE_INTDIV";
+ case FPE_INTOVF: return "FPE_INTOVF";
+ case FPE_FLTDIV: return "FPE_FLTDIV";
+ case FPE_FLTOVF: return "FPE_FLTOVF";
+ case FPE_FLTUND: return "FPE_FLTUND";
+ case FPE_FLTRES: return "FPE_FLTRES";
+ case FPE_FLTINV: return "FPE_FLTINV";
+ case FPE_FLTSUB: return "FPE_FLTSUB";
+ }
+ break;
+ case SIGSEGV:
+ switch (signal_code) {
+ case SEGV_MAPERR: return "SEGV_MAPERR";
+ case SEGV_ACCERR: return "SEGV_ACCERR";
+#if defined(SEGV_BNDERR)
+ case SEGV_BNDERR: return "SEGV_BNDERR";
+#endif
+ }
+ break;
+ case SIGTRAP:
+ switch (signal_code) {
+ case TRAP_BRKPT: return "TRAP_BRKPT";
+ case TRAP_TRACE: return "TRAP_TRACE";
+ }
+ break;
+ }
+ // Then the other codes...
+ switch (signal_code) {
+ case SI_USER: return "SI_USER";
+#if defined(SI_KERNEL)
+ case SI_KERNEL: return "SI_KERNEL";
+#endif
+ case SI_QUEUE: return "SI_QUEUE";
+ case SI_TIMER: return "SI_TIMER";
+ case SI_MESGQ: return "SI_MESGQ";
+ case SI_ASYNCIO: return "SI_ASYNCIO";
+#if defined(SI_SIGIO)
+ case SI_SIGIO: return "SI_SIGIO";
+#endif
+#if defined(SI_TKILL)
+ case SI_TKILL: return "SI_TKILL";
+#endif
+ }
+ // Then give up...
+ return "?";
+}
+
+struct UContext {
+ explicit UContext(void* raw_context)
+ : context(reinterpret_cast<ucontext_t*>(raw_context)->uc_mcontext) {}
+
+ void Dump(std::ostream& os) const;
+
+ void DumpRegister32(std::ostream& os, const char* name, uint32_t value) const;
+ void DumpRegister64(std::ostream& os, const char* name, uint64_t value) const;
+
+ void DumpX86Flags(std::ostream& os, uint32_t flags) const;
+ // Print some of the information from the status register (CPSR on ARMv7, PSTATE on ARMv8).
+ template <typename RegisterType>
+ void DumpArmStatusRegister(std::ostream& os, RegisterType status_register) const;
+
+ mcontext_t& context;
+};
+
+void UContext::Dump(std::ostream& os) const {
+#if defined(__APPLE__) && defined(__i386__)
+ DumpRegister32(os, "eax", context->__ss.__eax);
+ DumpRegister32(os, "ebx", context->__ss.__ebx);
+ DumpRegister32(os, "ecx", context->__ss.__ecx);
+ DumpRegister32(os, "edx", context->__ss.__edx);
+ os << '\n';
+
+ DumpRegister32(os, "edi", context->__ss.__edi);
+ DumpRegister32(os, "esi", context->__ss.__esi);
+ DumpRegister32(os, "ebp", context->__ss.__ebp);
+ DumpRegister32(os, "esp", context->__ss.__esp);
+ os << '\n';
+
+ DumpRegister32(os, "eip", context->__ss.__eip);
+ os << " ";
+ DumpRegister32(os, "eflags", context->__ss.__eflags);
+ DumpX86Flags(os, context->__ss.__eflags);
+ os << '\n';
+
+ DumpRegister32(os, "cs", context->__ss.__cs);
+ DumpRegister32(os, "ds", context->__ss.__ds);
+ DumpRegister32(os, "es", context->__ss.__es);
+ DumpRegister32(os, "fs", context->__ss.__fs);
+ os << '\n';
+ DumpRegister32(os, "gs", context->__ss.__gs);
+ DumpRegister32(os, "ss", context->__ss.__ss);
+#elif defined(__linux__) && defined(__i386__)
+ DumpRegister32(os, "eax", context.gregs[REG_EAX]);
+ DumpRegister32(os, "ebx", context.gregs[REG_EBX]);
+ DumpRegister32(os, "ecx", context.gregs[REG_ECX]);
+ DumpRegister32(os, "edx", context.gregs[REG_EDX]);
+ os << '\n';
+
+ DumpRegister32(os, "edi", context.gregs[REG_EDI]);
+ DumpRegister32(os, "esi", context.gregs[REG_ESI]);
+ DumpRegister32(os, "ebp", context.gregs[REG_EBP]);
+ DumpRegister32(os, "esp", context.gregs[REG_ESP]);
+ os << '\n';
+
+ DumpRegister32(os, "eip", context.gregs[REG_EIP]);
+ os << " ";
+ DumpRegister32(os, "eflags", context.gregs[REG_EFL]);
+ DumpX86Flags(os, context.gregs[REG_EFL]);
+ os << '\n';
+
+ DumpRegister32(os, "cs", context.gregs[REG_CS]);
+ DumpRegister32(os, "ds", context.gregs[REG_DS]);
+ DumpRegister32(os, "es", context.gregs[REG_ES]);
+ DumpRegister32(os, "fs", context.gregs[REG_FS]);
+ os << '\n';
+ DumpRegister32(os, "gs", context.gregs[REG_GS]);
+ DumpRegister32(os, "ss", context.gregs[REG_SS]);
+#elif defined(__linux__) && defined(__x86_64__)
+ DumpRegister64(os, "rax", context.gregs[REG_RAX]);
+ DumpRegister64(os, "rbx", context.gregs[REG_RBX]);
+ DumpRegister64(os, "rcx", context.gregs[REG_RCX]);
+ DumpRegister64(os, "rdx", context.gregs[REG_RDX]);
+ os << '\n';
+
+ DumpRegister64(os, "rdi", context.gregs[REG_RDI]);
+ DumpRegister64(os, "rsi", context.gregs[REG_RSI]);
+ DumpRegister64(os, "rbp", context.gregs[REG_RBP]);
+ DumpRegister64(os, "rsp", context.gregs[REG_RSP]);
+ os << '\n';
+
+ DumpRegister64(os, "r8 ", context.gregs[REG_R8]);
+ DumpRegister64(os, "r9 ", context.gregs[REG_R9]);
+ DumpRegister64(os, "r10", context.gregs[REG_R10]);
+ DumpRegister64(os, "r11", context.gregs[REG_R11]);
+ os << '\n';
+
+ DumpRegister64(os, "r12", context.gregs[REG_R12]);
+ DumpRegister64(os, "r13", context.gregs[REG_R13]);
+ DumpRegister64(os, "r14", context.gregs[REG_R14]);
+ DumpRegister64(os, "r15", context.gregs[REG_R15]);
+ os << '\n';
+
+ DumpRegister64(os, "rip", context.gregs[REG_RIP]);
+ os << " ";
+ DumpRegister32(os, "eflags", context.gregs[REG_EFL]);
+ DumpX86Flags(os, context.gregs[REG_EFL]);
+ os << '\n';
+
+ DumpRegister32(os, "cs", (context.gregs[REG_CSGSFS]) & 0x0FFFF);
+ DumpRegister32(os, "gs", (context.gregs[REG_CSGSFS] >> 16) & 0x0FFFF);
+ DumpRegister32(os, "fs", (context.gregs[REG_CSGSFS] >> 32) & 0x0FFFF);
+ os << '\n';
+#elif defined(__linux__) && defined(__arm__)
+ DumpRegister32(os, "r0", context.arm_r0);
+ DumpRegister32(os, "r1", context.arm_r1);
+ DumpRegister32(os, "r2", context.arm_r2);
+ DumpRegister32(os, "r3", context.arm_r3);
+ os << '\n';
+
+ DumpRegister32(os, "r4", context.arm_r4);
+ DumpRegister32(os, "r5", context.arm_r5);
+ DumpRegister32(os, "r6", context.arm_r6);
+ DumpRegister32(os, "r7", context.arm_r7);
+ os << '\n';
+
+ DumpRegister32(os, "r8", context.arm_r8);
+ DumpRegister32(os, "r9", context.arm_r9);
+ DumpRegister32(os, "r10", context.arm_r10);
+ DumpRegister32(os, "fp", context.arm_fp);
+ os << '\n';
+
+ DumpRegister32(os, "ip", context.arm_ip);
+ DumpRegister32(os, "sp", context.arm_sp);
+ DumpRegister32(os, "lr", context.arm_lr);
+ DumpRegister32(os, "pc", context.arm_pc);
+ os << '\n';
+
+ DumpRegister32(os, "cpsr", context.arm_cpsr);
+ DumpArmStatusRegister(os, context.arm_cpsr);
+ os << '\n';
+#elif defined(__linux__) && defined(__aarch64__)
+ for (size_t i = 0; i <= 30; ++i) {
+ std::string reg_name = "x" + std::to_string(i);
+ DumpRegister64(os, reg_name.c_str(), context.regs[i]);
+ if (i % 4 == 3) {
+ os << '\n';
+ }
+ }
+ os << '\n';
+
+ DumpRegister64(os, "sp", context.sp);
+ DumpRegister64(os, "pc", context.pc);
+ os << '\n';
+
+ DumpRegister64(os, "pstate", context.pstate);
+ DumpArmStatusRegister(os, context.pstate);
+ os << '\n';
+#else
+ // TODO: Add support for MIPS32 and MIPS64.
+ os << "Unknown architecture/word size/OS in ucontext dump";
+#endif
+}
+
+void UContext::DumpRegister32(std::ostream& os, const char* name, uint32_t value) const {
+ os << StringPrintf(" %6s: 0x%08x", name, value);
+}
+
+void UContext::DumpRegister64(std::ostream& os, const char* name, uint64_t value) const {
+ os << StringPrintf(" %6s: 0x%016" PRIx64, name, value);
+}
+
+void UContext::DumpX86Flags(std::ostream& os, uint32_t flags) const {
+ os << " [";
+ if ((flags & (1 << 0)) != 0) {
+ os << " CF";
+ }
+ if ((flags & (1 << 2)) != 0) {
+ os << " PF";
+ }
+ if ((flags & (1 << 4)) != 0) {
+ os << " AF";
+ }
+ if ((flags & (1 << 6)) != 0) {
+ os << " ZF";
+ }
+ if ((flags & (1 << 7)) != 0) {
+ os << " SF";
+ }
+ if ((flags & (1 << 8)) != 0) {
+ os << " TF";
+ }
+ if ((flags & (1 << 9)) != 0) {
+ os << " IF";
+ }
+ if ((flags & (1 << 10)) != 0) {
+ os << " DF";
+ }
+ if ((flags & (1 << 11)) != 0) {
+ os << " OF";
+ }
+ os << " ]";
+}
+
+template <typename RegisterType>
+void UContext::DumpArmStatusRegister(std::ostream& os, RegisterType status_register) const {
+ // Condition flags.
+ constexpr RegisterType kFlagV = 1U << 28;
+ constexpr RegisterType kFlagC = 1U << 29;
+ constexpr RegisterType kFlagZ = 1U << 30;
+ constexpr RegisterType kFlagN = 1U << 31;
+
+ os << " [";
+ if ((status_register & kFlagN) != 0) {
+ os << " N";
+ }
+ if ((status_register & kFlagZ) != 0) {
+ os << " Z";
+ }
+ if ((status_register & kFlagC) != 0) {
+ os << " C";
+ }
+ if ((status_register & kFlagV) != 0) {
+ os << " V";
+ }
+ os << " ]";
+}
+
+int GetTimeoutSignal() {
+#if defined(__APPLE__)
+ // Mac does not support realtime signals.
+ UNUSED(kUseSigRTTimeout);
+ return -1;
+#else
+ return kUseSigRTTimeout ? (SIGRTMIN + 2) : -1;
+#endif
+}
+
+static bool IsTimeoutSignal(int signal_number) {
+ return signal_number == GetTimeoutSignal();
+}
+
+#if defined(__APPLE__)
+// On macOS, clang complains about art::HandleUnexpectedSignalCommon's
+// stack frame size being too large; disable that warning locally.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wframe-larger-than="
+#endif
+
+void HandleUnexpectedSignalCommon(int signal_number,
+ siginfo_t* info,
+ void* raw_context,
+ bool running_on_linux) {
+ bool handle_timeout_signal = running_on_linux;
+ bool dump_on_stderr = running_on_linux;
+
+ static bool handling_unexpected_signal = false;
+ if (handling_unexpected_signal) {
+ LogHelper::LogLineLowStack(__FILE__,
+ __LINE__,
+ ::android::base::FATAL_WITHOUT_ABORT,
+ "HandleUnexpectedSignal reentered\n");
+ if (handle_timeout_signal) {
+ if (IsTimeoutSignal(signal_number)) {
+ // Ignore a recursive timeout.
+ return;
+ }
+ }
+ _exit(1);
+ }
+ handling_unexpected_signal = true;
+
+ gAborting++; // set before taking any locks
+ MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
+
+ bool has_address = (signal_number == SIGILL || signal_number == SIGBUS ||
+ signal_number == SIGFPE || signal_number == SIGSEGV);
+
+ OsInfo os_info;
+ const char* cmd_line = GetCmdLine();
+ if (cmd_line == nullptr) {
+ cmd_line = "<unset>"; // Because no-one called InitLogging.
+ }
+ pid_t tid = GetTid();
+ std::string thread_name(GetThreadName(tid));
+ UContext thread_context(raw_context);
+ Backtrace thread_backtrace(raw_context);
+
+ std::ostringstream stream;
+ stream << "*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***\n"
+ << StringPrintf("Fatal signal %d (%s), code %d (%s)",
+ signal_number,
+ GetSignalName(signal_number),
+ info->si_code,
+ GetSignalCodeName(signal_number, info->si_code))
+ << (has_address ? StringPrintf(" fault addr %p", info->si_addr) : "") << '\n'
+ << "OS: " << Dumpable<OsInfo>(os_info) << '\n'
+ << "Cmdline: " << cmd_line << '\n'
+ << "Thread: " << tid << " \"" << thread_name << "\"" << '\n'
+ << "Registers:\n" << Dumpable<UContext>(thread_context) << '\n'
+ << "Backtrace:\n" << Dumpable<Backtrace>(thread_backtrace) << '\n';
+ if (dump_on_stderr) {
+ // Note: We are using cerr directly instead of LOG macros to ensure even just partial output
+ // makes it out. That means we lose the "dalvikvm..." prefix, but that is acceptable
+ // considering this is an abort situation.
+ std::cerr << stream.str() << std::flush;
+ } else {
+ LOG(FATAL_WITHOUT_ABORT) << stream.str() << std::flush;
+ }
+ if (kIsDebugBuild && signal_number == SIGSEGV) {
+ PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
+ }
+
+ Runtime* runtime = Runtime::Current();
+ if (runtime != nullptr) {
+ if (handle_timeout_signal && IsTimeoutSignal(signal_number)) {
+ // Special timeout signal. Try to dump all threads.
+ // Note: Do not use DumpForSigQuit, as that might disable native unwind, but the native parts
+ // are of value here.
+ runtime->GetThreadList()->Dump(std::cerr, kDumpNativeStackOnTimeout);
+ std::cerr << std::endl;
+ }
+
+ if (dump_on_stderr) {
+ std::cerr << "Fault message: " << runtime->GetFaultMessage() << std::endl;
+ } else {
+ LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << runtime->GetFaultMessage();
+ }
+ }
+}
+
+#if defined(__APPLE__)
+#pragma GCC diagnostic pop
+#endif
+
+void InitPlatformSignalHandlersCommon(void (*newact)(int, siginfo_t*, void*),
+ struct sigaction* oldact,
+ bool handle_timeout_signal) {
+ struct sigaction action;
+ memset(&action, 0, sizeof(action));
+ sigemptyset(&action.sa_mask);
+ action.sa_sigaction = newact;
+ // Use the three-argument sa_sigaction handler.
+ action.sa_flags |= SA_SIGINFO;
+ // Use the alternate signal stack so we can catch stack overflows.
+ action.sa_flags |= SA_ONSTACK;
+
+ int rc = 0;
+ rc += sigaction(SIGABRT, &action, oldact);
+ rc += sigaction(SIGBUS, &action, oldact);
+ rc += sigaction(SIGFPE, &action, oldact);
+ rc += sigaction(SIGILL, &action, oldact);
+ rc += sigaction(SIGPIPE, &action, oldact);
+ rc += sigaction(SIGSEGV, &action, oldact);
+#if defined(SIGSTKFLT)
+ rc += sigaction(SIGSTKFLT, &action, oldact);
+#endif
+ rc += sigaction(SIGTRAP, &action, oldact);
+ // Special dump-all timeout.
+ if (handle_timeout_signal && GetTimeoutSignal() != -1) {
+ rc += sigaction(GetTimeoutSignal(), &action, oldact);
+ }
+ CHECK_EQ(rc, 0);
+}
+
+} // namespace art
diff --git a/runtime/runtime_common.h b/runtime/runtime_common.h
new file mode 100644
index 0000000..832b6bb
--- /dev/null
+++ b/runtime/runtime_common.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_RUNTIME_COMMON_H_
+#define ART_RUNTIME_RUNTIME_COMMON_H_
+
+// Code shared by runtime/runtime_android.cc and runtime/runtime_linux.cc.
+
+#if defined(__APPLE__)
+// On macOS, _XOPEN_SOURCE must be defined to access ucontext
+// routines, as they are considered deprecated on that platform.
+#define _XOPEN_SOURCE
+#endif
+
+#include <sys/utsname.h>
+#include <ucontext.h>
+
+#include <iomanip>
+
+#include "base/dumpable.h"
+#include "native_stack_dump.h"
+#include "utils.h"
+
+namespace art {
+
+struct Backtrace {
+ public:
+ explicit Backtrace(void* raw_context) : raw_context_(raw_context) {}
+ void Dump(std::ostream& os) const {
+ DumpNativeStack(os, GetTid(), nullptr, "\t", nullptr, raw_context_);
+ }
+ private:
+ // Stores the context of the signal that was unexpected and will terminate the runtime. The
+ // DumpNativeStack code will take care of casting it to the expected type. This is required
+ // as our signal handler runs on an alternate stack.
+ void* raw_context_;
+};
+
+struct OsInfo {
+ void Dump(std::ostream& os) const {
+ utsname info;
+ uname(&info);
+ // Linux 2.6.38.8-gg784 (x86_64)
+ // Darwin 11.4.0 (x86_64)
+ os << info.sysname << " " << info.release << " (" << info.machine << ")";
+ }
+};
+
+const char* GetSignalName(int signal_number);
+const char* GetSignalCodeName(int signal_number, int signal_code);
+
+// Return the signal number we recognize as timeout. -1 means not active/supported.
+int GetTimeoutSignal();
+
+void HandleUnexpectedSignalCommon(int signal_number,
+ siginfo_t* info,
+ void* raw_context,
+ bool running_on_linux);
+
+void InitPlatformSignalHandlersCommon(void (*newact)(int, siginfo_t*, void*),
+ struct sigaction* oldact,
+ bool handle_timeout_signal);
+
+} // namespace art
+
+#endif // ART_RUNTIME_RUNTIME_COMMON_H_
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index b8894d2..ad61cf3 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -17,359 +17,19 @@
#include "runtime.h"
#include <signal.h>
-#include <string.h>
-#include <sys/utsname.h>
-#include <inttypes.h>
#include <iostream>
-#include <sstream>
-#include "android-base/stringprintf.h"
-
-#include "base/dumpable.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "native_stack_dump.h"
-#include "thread-inl.h"
-#include "thread_list.h"
-#include "utils.h"
+#include "runtime_common.h"
namespace art {
-using android::base::StringPrintf;
+void HandleUnexpectedSignalLinux(int signal_number, siginfo_t* info, void* raw_context) {
+ HandleUnexpectedSignalCommon(signal_number, info, raw_context, /* running_on_linux */ true);
-static constexpr bool kUseSigRTTimeout = true;
-static constexpr bool kDumpNativeStackOnTimeout = true;
-
-struct Backtrace {
- public:
- explicit Backtrace(void* raw_context) : raw_context_(raw_context) {}
- void Dump(std::ostream& os) const {
- DumpNativeStack(os, GetTid(), nullptr, "\t", nullptr, raw_context_);
- }
- private:
- // Stores the context of the signal that was unexpected and will terminate the runtime. The
- // DumpNativeStack code will take care of casting it to the expected type. This is required
- // as our signal handler runs on an alternate stack.
- void* raw_context_;
-};
-
-struct OsInfo {
- void Dump(std::ostream& os) const {
- utsname info;
- uname(&info);
- // Linux 2.6.38.8-gg784 (x86_64)
- // Darwin 11.4.0 (x86_64)
- os << info.sysname << " " << info.release << " (" << info.machine << ")";
- }
-};
-
-static const char* GetSignalName(int signal_number) {
- switch (signal_number) {
- case SIGABRT: return "SIGABRT";
- case SIGBUS: return "SIGBUS";
- case SIGFPE: return "SIGFPE";
- case SIGILL: return "SIGILL";
- case SIGPIPE: return "SIGPIPE";
- case SIGSEGV: return "SIGSEGV";
-#if defined(SIGSTKFLT)
- case SIGSTKFLT: return "SIGSTKFLT";
-#endif
- case SIGTRAP: return "SIGTRAP";
- }
- return "??";
-}
-
-static const char* GetSignalCodeName(int signal_number, int signal_code) {
- // Try the signal-specific codes...
- switch (signal_number) {
- case SIGILL:
- switch (signal_code) {
- case ILL_ILLOPC: return "ILL_ILLOPC";
- case ILL_ILLOPN: return "ILL_ILLOPN";
- case ILL_ILLADR: return "ILL_ILLADR";
- case ILL_ILLTRP: return "ILL_ILLTRP";
- case ILL_PRVOPC: return "ILL_PRVOPC";
- case ILL_PRVREG: return "ILL_PRVREG";
- case ILL_COPROC: return "ILL_COPROC";
- case ILL_BADSTK: return "ILL_BADSTK";
- }
- break;
- case SIGBUS:
- switch (signal_code) {
- case BUS_ADRALN: return "BUS_ADRALN";
- case BUS_ADRERR: return "BUS_ADRERR";
- case BUS_OBJERR: return "BUS_OBJERR";
- }
- break;
- case SIGFPE:
- switch (signal_code) {
- case FPE_INTDIV: return "FPE_INTDIV";
- case FPE_INTOVF: return "FPE_INTOVF";
- case FPE_FLTDIV: return "FPE_FLTDIV";
- case FPE_FLTOVF: return "FPE_FLTOVF";
- case FPE_FLTUND: return "FPE_FLTUND";
- case FPE_FLTRES: return "FPE_FLTRES";
- case FPE_FLTINV: return "FPE_FLTINV";
- case FPE_FLTSUB: return "FPE_FLTSUB";
- }
- break;
- case SIGSEGV:
- switch (signal_code) {
- case SEGV_MAPERR: return "SEGV_MAPERR";
- case SEGV_ACCERR: return "SEGV_ACCERR";
-#if defined(SEGV_BNDERR)
- case SEGV_BNDERR: return "SEGV_BNDERR";
-#endif
- }
- break;
- case SIGTRAP:
- switch (signal_code) {
- case TRAP_BRKPT: return "TRAP_BRKPT";
- case TRAP_TRACE: return "TRAP_TRACE";
- }
- break;
- }
- // Then the other codes...
- switch (signal_code) {
- case SI_USER: return "SI_USER";
-#if defined(SI_KERNEL)
- case SI_KERNEL: return "SI_KERNEL";
-#endif
- case SI_QUEUE: return "SI_QUEUE";
- case SI_TIMER: return "SI_TIMER";
- case SI_MESGQ: return "SI_MESGQ";
- case SI_ASYNCIO: return "SI_ASYNCIO";
-#if defined(SI_SIGIO)
- case SI_SIGIO: return "SI_SIGIO";
-#endif
-#if defined(SI_TKILL)
- case SI_TKILL: return "SI_TKILL";
-#endif
- }
- // Then give up...
- return "?";
-}
-
-struct UContext {
- explicit UContext(void* raw_context) :
- context(reinterpret_cast<ucontext_t*>(raw_context)->uc_mcontext) {
- }
-
- void Dump(std::ostream& os) const {
- // TODO: support non-x86 hosts (not urgent because this code doesn't run on targets).
-#if defined(__APPLE__) && defined(__i386__)
- DumpRegister32(os, "eax", context->__ss.__eax);
- DumpRegister32(os, "ebx", context->__ss.__ebx);
- DumpRegister32(os, "ecx", context->__ss.__ecx);
- DumpRegister32(os, "edx", context->__ss.__edx);
- os << '\n';
-
- DumpRegister32(os, "edi", context->__ss.__edi);
- DumpRegister32(os, "esi", context->__ss.__esi);
- DumpRegister32(os, "ebp", context->__ss.__ebp);
- DumpRegister32(os, "esp", context->__ss.__esp);
- os << '\n';
-
- DumpRegister32(os, "eip", context->__ss.__eip);
- os << " ";
- DumpRegister32(os, "eflags", context->__ss.__eflags);
- DumpX86Flags(os, context->__ss.__eflags);
- os << '\n';
-
- DumpRegister32(os, "cs", context->__ss.__cs);
- DumpRegister32(os, "ds", context->__ss.__ds);
- DumpRegister32(os, "es", context->__ss.__es);
- DumpRegister32(os, "fs", context->__ss.__fs);
- os << '\n';
- DumpRegister32(os, "gs", context->__ss.__gs);
- DumpRegister32(os, "ss", context->__ss.__ss);
-#elif defined(__linux__) && defined(__i386__)
- DumpRegister32(os, "eax", context.gregs[REG_EAX]);
- DumpRegister32(os, "ebx", context.gregs[REG_EBX]);
- DumpRegister32(os, "ecx", context.gregs[REG_ECX]);
- DumpRegister32(os, "edx", context.gregs[REG_EDX]);
- os << '\n';
-
- DumpRegister32(os, "edi", context.gregs[REG_EDI]);
- DumpRegister32(os, "esi", context.gregs[REG_ESI]);
- DumpRegister32(os, "ebp", context.gregs[REG_EBP]);
- DumpRegister32(os, "esp", context.gregs[REG_ESP]);
- os << '\n';
-
- DumpRegister32(os, "eip", context.gregs[REG_EIP]);
- os << " ";
- DumpRegister32(os, "eflags", context.gregs[REG_EFL]);
- DumpX86Flags(os, context.gregs[REG_EFL]);
- os << '\n';
-
- DumpRegister32(os, "cs", context.gregs[REG_CS]);
- DumpRegister32(os, "ds", context.gregs[REG_DS]);
- DumpRegister32(os, "es", context.gregs[REG_ES]);
- DumpRegister32(os, "fs", context.gregs[REG_FS]);
- os << '\n';
- DumpRegister32(os, "gs", context.gregs[REG_GS]);
- DumpRegister32(os, "ss", context.gregs[REG_SS]);
-#elif defined(__linux__) && defined(__x86_64__)
- DumpRegister64(os, "rax", context.gregs[REG_RAX]);
- DumpRegister64(os, "rbx", context.gregs[REG_RBX]);
- DumpRegister64(os, "rcx", context.gregs[REG_RCX]);
- DumpRegister64(os, "rdx", context.gregs[REG_RDX]);
- os << '\n';
-
- DumpRegister64(os, "rdi", context.gregs[REG_RDI]);
- DumpRegister64(os, "rsi", context.gregs[REG_RSI]);
- DumpRegister64(os, "rbp", context.gregs[REG_RBP]);
- DumpRegister64(os, "rsp", context.gregs[REG_RSP]);
- os << '\n';
-
- DumpRegister64(os, "r8 ", context.gregs[REG_R8]);
- DumpRegister64(os, "r9 ", context.gregs[REG_R9]);
- DumpRegister64(os, "r10", context.gregs[REG_R10]);
- DumpRegister64(os, "r11", context.gregs[REG_R11]);
- os << '\n';
-
- DumpRegister64(os, "r12", context.gregs[REG_R12]);
- DumpRegister64(os, "r13", context.gregs[REG_R13]);
- DumpRegister64(os, "r14", context.gregs[REG_R14]);
- DumpRegister64(os, "r15", context.gregs[REG_R15]);
- os << '\n';
-
- DumpRegister64(os, "rip", context.gregs[REG_RIP]);
- os << " ";
- DumpRegister32(os, "eflags", context.gregs[REG_EFL]);
- DumpX86Flags(os, context.gregs[REG_EFL]);
- os << '\n';
-
- DumpRegister32(os, "cs", (context.gregs[REG_CSGSFS]) & 0x0FFFF);
- DumpRegister32(os, "gs", (context.gregs[REG_CSGSFS] >> 16) & 0x0FFFF);
- DumpRegister32(os, "fs", (context.gregs[REG_CSGSFS] >> 32) & 0x0FFFF);
- os << '\n';
-#else
- os << "Unknown architecture/word size/OS in ucontext dump";
-#endif
- }
-
- void DumpRegister32(std::ostream& os, const char* name, uint32_t value) const {
- os << StringPrintf(" %6s: 0x%08x", name, value);
- }
-
- void DumpRegister64(std::ostream& os, const char* name, uint64_t value) const {
- os << StringPrintf(" %6s: 0x%016" PRIx64, name, value);
- }
-
- void DumpX86Flags(std::ostream& os, uint32_t flags) const {
- os << " [";
- if ((flags & (1 << 0)) != 0) {
- os << " CF";
- }
- if ((flags & (1 << 2)) != 0) {
- os << " PF";
- }
- if ((flags & (1 << 4)) != 0) {
- os << " AF";
- }
- if ((flags & (1 << 6)) != 0) {
- os << " ZF";
- }
- if ((flags & (1 << 7)) != 0) {
- os << " SF";
- }
- if ((flags & (1 << 8)) != 0) {
- os << " TF";
- }
- if ((flags & (1 << 9)) != 0) {
- os << " IF";
- }
- if ((flags & (1 << 10)) != 0) {
- os << " DF";
- }
- if ((flags & (1 << 11)) != 0) {
- os << " OF";
- }
- os << " ]";
- }
-
- mcontext_t& context;
-};
-
-// Return the signal number we recognize as timeout. -1 means not active/supported.
-static int GetTimeoutSignal() {
-#if defined(__APPLE__)
- // Mac does not support realtime signals.
- UNUSED(kUseSigRTTimeout);
- return -1;
-#else
- return kUseSigRTTimeout ? (SIGRTMIN + 2) : -1;
-#endif
-}
-
-static bool IsTimeoutSignal(int signal_number) {
- return signal_number == GetTimeoutSignal();
-}
-
-void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) {
- static bool handlingUnexpectedSignal = false;
- if (handlingUnexpectedSignal) {
- LogHelper::LogLineLowStack(__FILE__,
- __LINE__,
- ::android::base::FATAL_WITHOUT_ABORT,
- "HandleUnexpectedSignal reentered\n");
- if (IsTimeoutSignal(signal_number)) {
- // Ignore a recursive timeout.
- return;
- }
- _exit(1);
- }
- handlingUnexpectedSignal = true;
-
- gAborting++; // set before taking any locks
- MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
-
- bool has_address = (signal_number == SIGILL || signal_number == SIGBUS ||
- signal_number == SIGFPE || signal_number == SIGSEGV);
-
- OsInfo os_info;
- const char* cmd_line = GetCmdLine();
- if (cmd_line == nullptr) {
- cmd_line = "<unset>"; // Because no-one called InitLogging.
- }
- pid_t tid = GetTid();
- std::string thread_name(GetThreadName(tid));
- UContext thread_context(raw_context);
- Backtrace thread_backtrace(raw_context);
-
- // Note: We are using cerr directly instead of LOG macros to ensure even just partial output
- // makes it out. That means we lose the "dalvikvm..." prefix, but that is acceptable
- // considering this is an abort situation.
-
- std::cerr << "*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***\n"
- << StringPrintf("Fatal signal %d (%s), code %d (%s)",
- signal_number, GetSignalName(signal_number),
- info->si_code,
- GetSignalCodeName(signal_number, info->si_code))
- << (has_address ? StringPrintf(" fault addr %p", info->si_addr) : "") << std::endl
- << "OS: " << Dumpable<OsInfo>(os_info) << std::endl
- << "Cmdline: " << cmd_line << std::endl
- << "Thread: " << tid << " \"" << thread_name << "\"" << std::endl
- << "Registers:\n" << Dumpable<UContext>(thread_context) << std::endl
- << "Backtrace:\n" << Dumpable<Backtrace>(thread_backtrace) << std::endl;
- if (kIsDebugBuild && signal_number == SIGSEGV) {
- PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
- }
- Runtime* runtime = Runtime::Current();
- if (runtime != nullptr) {
- if (IsTimeoutSignal(signal_number)) {
- // Special timeout signal. Try to dump all threads.
- // Note: Do not use DumpForSigQuit, as that might disable native unwind, but the native parts
- // are of value here.
- runtime->GetThreadList()->Dump(std::cerr, kDumpNativeStackOnTimeout);
- std::cerr << std::endl;
- }
- std::cerr << "Fault message: " << runtime->GetFaultMessage() << std::endl;
- }
if (getenv("debug_db_uid") != nullptr || getenv("art_wait_for_gdb_on_crash") != nullptr) {
+ pid_t tid = GetTid();
+ std::string thread_name(GetThreadName(tid));
std::cerr << "********************************************************\n"
<< "* Process " << getpid() << " thread " << tid << " \"" << thread_name
<< "\""
@@ -398,31 +58,9 @@
void Runtime::InitPlatformSignalHandlers() {
// On the host, we don't have debuggerd to dump a stack for us when something unexpected happens.
- struct sigaction action;
- memset(&action, 0, sizeof(action));
- sigemptyset(&action.sa_mask);
- action.sa_sigaction = HandleUnexpectedSignal;
- // Use the three-argument sa_sigaction handler.
- action.sa_flags |= SA_SIGINFO;
- // Use the alternate signal stack so we can catch stack overflows.
- action.sa_flags |= SA_ONSTACK;
-
- int rc = 0;
- rc += sigaction(SIGABRT, &action, nullptr);
- rc += sigaction(SIGBUS, &action, nullptr);
- rc += sigaction(SIGFPE, &action, nullptr);
- rc += sigaction(SIGILL, &action, nullptr);
- rc += sigaction(SIGPIPE, &action, nullptr);
- rc += sigaction(SIGSEGV, &action, nullptr);
-#if defined(SIGSTKFLT)
- rc += sigaction(SIGSTKFLT, &action, nullptr);
-#endif
- rc += sigaction(SIGTRAP, &action, nullptr);
- // Special dump-all timeout.
- if (GetTimeoutSignal() != -1) {
- rc += sigaction(GetTimeoutSignal(), &action, nullptr);
- }
- CHECK_EQ(rc, 0);
+ InitPlatformSignalHandlersCommon(HandleUnexpectedSignalLinux,
+ nullptr,
+ /* handle_timeout_signal */ true);
}
} // namespace art
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 749a36e..e68a1b2 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -119,11 +119,10 @@
RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback)
RUNTIME_OPTIONS_KEY (std::string, CpuAbiList)
RUNTIME_OPTIONS_KEY (std::string, Fingerprint)
-RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::kNone) // -Xexperimental:{none, agents}
-RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentLib) // -agentlib:<libname>=<options>, Requires -Xexperimental:agents
-RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentPath) // -agentpath:<libname>=<options>, Requires -Xexperimental:agents
-RUNTIME_OPTIONS_KEY (std::vector<Plugin>, Plugins) // -Xplugin:<library> Requires -Xexperimental:runtime-plugins
-RUNTIME_OPTIONS_KEY (Unit, FullyDeoptable) // -Xfully-deoptable
+RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::kNone) // -Xexperimental:{...}
+RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentLib) // -agentlib:<libname>=<options>
+RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentPath) // -agentpath:<libname>=<options>
+RUNTIME_OPTIONS_KEY (std::vector<Plugin>, Plugins) // -Xplugin:<library>
// Not parse-able from command line, but can be provided explicitly.
// (Do not add anything here that is defined in ParsedOptions::MakeParser)
diff --git a/runtime/scoped_thread_state_change-inl.h b/runtime/scoped_thread_state_change-inl.h
index d4469f4..000da59 100644
--- a/runtime/scoped_thread_state_change-inl.h
+++ b/runtime/scoped_thread_state_change-inl.h
@@ -110,6 +110,10 @@
Locks::mutator_lock_->AssertSharedHeld(Self());
}
+inline ScopedObjectAccess::ScopedObjectAccess(JNIEnv* env) : ScopedObjectAccessUnchecked(env) {}
+inline ScopedObjectAccess::ScopedObjectAccess(Thread* self) : ScopedObjectAccessUnchecked(self) {}
+inline ScopedObjectAccess::~ScopedObjectAccess() {}
+
inline ScopedThreadSuspension::ScopedThreadSuspension(Thread* self, ThreadState suspended_state)
: self_(self), suspended_state_(suspended_state) {
DCHECK(self_ != nullptr);
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index b499258..24199f7 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -159,16 +159,14 @@
public:
ALWAYS_INLINE explicit ScopedObjectAccess(JNIEnv* env)
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
- : ScopedObjectAccessUnchecked(env) {}
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
ALWAYS_INLINE explicit ScopedObjectAccess(Thread* self)
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
- : ScopedObjectAccessUnchecked(self) {}
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
// Base class will release share of lock. Invoked after this destructor.
- ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE {}
+ ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE;
private:
// TODO: remove this constructor. It is used by check JNI's ScopedCheck to make it believe that
diff --git a/runtime/stack.cc b/runtime/stack.cc
index f9efc0b..c737fe4 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -37,7 +37,7 @@
#include "runtime.h"
#include "thread.h"
#include "thread_list.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
namespace art {
@@ -96,13 +96,17 @@
return false;
}
-StackVisitor::StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
- : StackVisitor(thread, context, walk_kind, 0) {}
+StackVisitor::StackVisitor(Thread* thread,
+ Context* context,
+ StackWalkKind walk_kind,
+ bool check_suspended)
+ : StackVisitor(thread, context, walk_kind, 0, check_suspended) {}
StackVisitor::StackVisitor(Thread* thread,
Context* context,
StackWalkKind walk_kind,
- size_t num_frames)
+ size_t num_frames,
+ bool check_suspended)
: thread_(thread),
walk_kind_(walk_kind),
cur_shadow_frame_(nullptr),
@@ -112,8 +116,11 @@
num_frames_(num_frames),
cur_depth_(0),
current_inlining_depth_(0),
- context_(context) {
- DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
+ context_(context),
+ check_suspended_(check_suspended) {
+ if (check_suspended_) {
+ DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
+ }
}
InlineInfo StackVisitor::GetCurrentInlineInfo() const {
@@ -625,7 +632,7 @@
} else {
DCHECK(cur_quick_frame_ != nullptr);
CHECK(!IsInInlinedFrame()) << "We do not support setting inlined method's ArtMethod!";
- *cur_quick_frame_ = method;
+ *cur_quick_frame_ = method;
}
}
@@ -788,7 +795,9 @@
template <StackVisitor::CountTransitions kCount>
void StackVisitor::WalkStack(bool include_transitions) {
- DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
+ if (check_suspended_) {
+ DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
+ }
CHECK_EQ(cur_depth_, 0U);
bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
uint32_t instrumentation_stack_depth = 0;
diff --git a/runtime/stack.h b/runtime/stack.h
index 9dceb29..90a0aee 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -590,7 +590,10 @@
};
protected:
- StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind);
+ StackVisitor(Thread* thread,
+ Context* context,
+ StackWalkKind walk_kind,
+ bool check_suspended = true);
bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -797,7 +800,11 @@
private:
// Private constructor known in the case that num_frames_ has already been computed.
- StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames)
+ StackVisitor(Thread* thread,
+ Context* context,
+ StackWalkKind walk_kind,
+ size_t num_frames,
+ bool check_suspended = true)
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsAccessibleRegister(uint32_t reg, bool is_float) const {
@@ -851,6 +858,7 @@
protected:
Context* const context_;
+ const bool check_suspended_;
};
} // namespace art
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 690b069..4e7c3f4 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -97,8 +97,9 @@
<< ", dex_pc_bit_offset=" << static_cast<uint32_t>(dex_pc_bit_offset_)
<< ", dex_register_map_bit_offset=" << static_cast<uint32_t>(dex_register_map_bit_offset_)
<< ", inline_info_bit_offset=" << static_cast<uint32_t>(inline_info_bit_offset_)
- << ", register_mask_bit_offset=" << static_cast<uint32_t>(register_mask_bit_offset_)
- << ", stack_mask_bit_offset=" << static_cast<uint32_t>(stack_mask_bit_offset_)
+ << ", register_mask_bit_offset=" << static_cast<uint32_t>(register_mask_index_bit_offset_)
+ << ", stack_mask_index_bit_offset=" << static_cast<uint32_t>(stack_mask_index_bit_offset_)
+ << ", total_bit_size=" << static_cast<uint32_t>(total_bit_size_)
<< ")\n";
}
@@ -198,16 +199,17 @@
<< "StackMap" << header_suffix
<< std::hex
<< " [native_pc=0x" << code_offset + pc_offset << "]"
- << " [entry_size=0x" << encoding.stack_map_size_in_bytes << "]"
+ << " [entry_size=0x" << encoding.stack_map_encoding.BitSize() << " bits]"
<< " (dex_pc=0x" << GetDexPc(stack_map_encoding)
<< ", native_pc_offset=0x" << pc_offset
<< ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(stack_map_encoding)
<< ", inline_info_offset=0x" << GetInlineDescriptorOffset(stack_map_encoding)
- << ", register_mask=0x" << GetRegisterMask(stack_map_encoding)
+ << ", register_mask=0x" << code_info.GetRegisterMaskOf(encoding, *this)
<< std::dec
<< ", stack_mask=0b";
- for (size_t i = 0, e = GetNumberOfStackMaskBits(stack_map_encoding); i < e; ++i) {
- vios->Stream() << GetStackMaskBit(stack_map_encoding, e - i - 1);
+ BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, *this);
+ for (size_t i = 0, e = encoding.stack_mask_size_in_bits; i < e; ++i) {
+ vios->Stream() << stack_mask.LoadBit(e - i - 1);
}
vios->Stream() << ")\n";
if (HasDexRegisterMap(stack_map_encoding)) {
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index cd9a3f0..062404d 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -20,6 +20,7 @@
#include "arch/code_offset.h"
#include "base/bit_vector.h"
#include "base/bit_utils.h"
+#include "bit_memory_region.h"
#include "dex_file.h"
#include "memory_region.h"
#include "leb128.h"
@@ -665,37 +666,14 @@
ALWAYS_INLINE size_t BitSize() const { return end_offset_ - start_offset_; }
- ALWAYS_INLINE int32_t Load(const MemoryRegion& region) const {
+ template <typename Region>
+ ALWAYS_INLINE int32_t Load(const Region& region) const {
DCHECK_LE(end_offset_, region.size_in_bits());
- const size_t bit_count = BitSize();
- if (bit_count == 0) {
- // Do not touch any memory if the range is empty.
- return min_value_;
- }
- uint8_t* address = region.start() + start_offset_ / kBitsPerByte;
- const uint32_t shift = start_offset_ & (kBitsPerByte - 1);
- // Load the value (reading only the strictly needed bytes).
- const uint32_t load_bit_count = shift + bit_count;
- uint32_t value = *address++ >> shift;
- if (load_bit_count > 8) {
- value |= static_cast<uint32_t>(*address++) << (8 - shift);
- if (load_bit_count > 16) {
- value |= static_cast<uint32_t>(*address++) << (16 - shift);
- if (load_bit_count > 24) {
- value |= static_cast<uint32_t>(*address++) << (24 - shift);
- if (load_bit_count > 32) {
- value |= static_cast<uint32_t>(*address++) << (32 - shift);
- }
- }
- }
- }
- // Clear unwanted most significant bits.
- uint32_t clear_bit_count = 32 - bit_count;
- value = (value << clear_bit_count) >> clear_bit_count;
- return value + min_value_;
+ return static_cast<int32_t>(region.LoadBits(start_offset_, BitSize())) + min_value_;
}
- ALWAYS_INLINE void Store(MemoryRegion region, int32_t value) const {
+ template <typename Region>
+ ALWAYS_INLINE void Store(Region region, int32_t value) const {
region.StoreBits(start_offset_, value - min_value_, BitSize());
DCHECK_EQ(Load(region), value);
}
@@ -711,40 +689,40 @@
StackMapEncoding() {}
// Set stack map bit layout based on given sizes.
- // Returns the size of stack map in bytes.
+ // Returns the size of stack map in bits.
size_t SetFromSizes(size_t native_pc_max,
size_t dex_pc_max,
size_t dex_register_map_size,
size_t inline_info_size,
- size_t register_mask_max,
- size_t stack_mask_bit_size) {
- size_t bit_offset = 0;
- DCHECK_EQ(kNativePcBitOffset, bit_offset);
- bit_offset += MinimumBitsToStore(native_pc_max);
+ size_t number_of_register_masks,
+ size_t number_of_stack_masks) {
+ total_bit_size_ = 0;
+ DCHECK_EQ(kNativePcBitOffset, total_bit_size_);
+ total_bit_size_ += MinimumBitsToStore(native_pc_max);
- dex_pc_bit_offset_ = dchecked_integral_cast<uint8_t>(bit_offset);
- bit_offset += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max);
+ dex_pc_bit_offset_ = total_bit_size_;
+ total_bit_size_ += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max);
// We also need +1 for kNoDexRegisterMap, but since the size is strictly
// greater than any offset we might try to encode, we already implicitly have it.
- dex_register_map_bit_offset_ = dchecked_integral_cast<uint8_t>(bit_offset);
- bit_offset += MinimumBitsToStore(dex_register_map_size);
+ dex_register_map_bit_offset_ = total_bit_size_;
+ total_bit_size_ += MinimumBitsToStore(dex_register_map_size);
// We also need +1 for kNoInlineInfo, but since the inline_info_size is strictly
// greater than the offset we might try to encode, we already implicitly have it.
// If inline_info_size is zero, we can encode only kNoInlineInfo (in zero bits).
- inline_info_bit_offset_ = dchecked_integral_cast<uint8_t>(bit_offset);
+ inline_info_bit_offset_ = total_bit_size_;
if (inline_info_size != 0) {
- bit_offset += MinimumBitsToStore(dex_register_map_size + inline_info_size);
+ total_bit_size_ += MinimumBitsToStore(dex_register_map_size + inline_info_size);
}
- register_mask_bit_offset_ = dchecked_integral_cast<uint8_t>(bit_offset);
- bit_offset += MinimumBitsToStore(register_mask_max);
+ register_mask_index_bit_offset_ = total_bit_size_;
+ total_bit_size_ += MinimumBitsToStore(number_of_register_masks);
- stack_mask_bit_offset_ = dchecked_integral_cast<uint8_t>(bit_offset);
- bit_offset += stack_mask_bit_size;
+ stack_mask_index_bit_offset_ = total_bit_size_;
+ total_bit_size_ += MinimumBitsToStore(number_of_stack_masks);
- return RoundUp(bit_offset, kBitsPerByte) / kBitsPerByte;
+ return total_bit_size_;
}
ALWAYS_INLINE FieldEncoding GetNativePcEncoding() const {
@@ -757,14 +735,18 @@
return FieldEncoding(dex_register_map_bit_offset_, inline_info_bit_offset_, -1 /* min_value */);
}
ALWAYS_INLINE FieldEncoding GetInlineInfoEncoding() const {
- return FieldEncoding(inline_info_bit_offset_, register_mask_bit_offset_, -1 /* min_value */);
+ return FieldEncoding(inline_info_bit_offset_,
+ register_mask_index_bit_offset_,
+ -1 /* min_value */);
}
- ALWAYS_INLINE FieldEncoding GetRegisterMaskEncoding() const {
- return FieldEncoding(register_mask_bit_offset_, stack_mask_bit_offset_);
+ ALWAYS_INLINE FieldEncoding GetRegisterMaskIndexEncoding() const {
+ return FieldEncoding(register_mask_index_bit_offset_, stack_mask_index_bit_offset_);
}
- ALWAYS_INLINE size_t GetStackMaskBitOffset() const {
- // The end offset is not encoded. It is implicitly the end of stack map entry.
- return stack_mask_bit_offset_;
+ ALWAYS_INLINE FieldEncoding GetStackMaskIndexEncoding() const {
+ return FieldEncoding(stack_mask_index_bit_offset_, total_bit_size_);
+ }
+ ALWAYS_INLINE size_t BitSize() const {
+ return total_bit_size_;
}
void Dump(VariableIndentationOutputStream* vios) const;
@@ -774,8 +756,9 @@
uint8_t dex_pc_bit_offset_;
uint8_t dex_register_map_bit_offset_;
uint8_t inline_info_bit_offset_;
- uint8_t register_mask_bit_offset_;
- uint8_t stack_mask_bit_offset_;
+ uint8_t register_mask_index_bit_offset_;
+ uint8_t stack_mask_index_bit_offset_;
+ uint8_t total_bit_size_;
};
/**
@@ -788,13 +771,13 @@
*
* The information is of the form:
*
- * [native_pc_offset, dex_pc, dex_register_map_offset, inlining_info_offset, register_mask,
- * stack_mask].
+ * [native_pc_offset, dex_pc, dex_register_map_offset, inlining_info_offset, register_mask_index,
+ * stack_mask_index].
*/
class StackMap {
public:
StackMap() {}
- explicit StackMap(MemoryRegion region) : region_(region) {}
+ explicit StackMap(BitMemoryRegion region) : region_(region) {}
ALWAYS_INLINE bool IsValid() const { return region_.pointer() != nullptr; }
@@ -834,24 +817,20 @@
encoding.GetInlineInfoEncoding().Store(region_, offset);
}
- ALWAYS_INLINE uint32_t GetRegisterMask(const StackMapEncoding& encoding) const {
- return encoding.GetRegisterMaskEncoding().Load(region_);
+ ALWAYS_INLINE uint32_t GetRegisterMaskIndex(const StackMapEncoding& encoding) const {
+ return encoding.GetRegisterMaskIndexEncoding().Load(region_);
}
- ALWAYS_INLINE void SetRegisterMask(const StackMapEncoding& encoding, uint32_t mask) {
- encoding.GetRegisterMaskEncoding().Store(region_, mask);
+ ALWAYS_INLINE void SetRegisterMaskIndex(const StackMapEncoding& encoding, uint32_t mask) {
+ encoding.GetRegisterMaskIndexEncoding().Store(region_, mask);
}
- ALWAYS_INLINE size_t GetNumberOfStackMaskBits(const StackMapEncoding& encoding) const {
- return region_.size_in_bits() - encoding.GetStackMaskBitOffset();
+ ALWAYS_INLINE uint32_t GetStackMaskIndex(const StackMapEncoding& encoding) const {
+ return encoding.GetStackMaskIndexEncoding().Load(region_);
}
- ALWAYS_INLINE bool GetStackMaskBit(const StackMapEncoding& encoding, size_t index) const {
- return region_.LoadBit(encoding.GetStackMaskBitOffset() + index);
- }
-
- ALWAYS_INLINE void SetStackMaskBit(const StackMapEncoding& encoding, size_t index, bool value) {
- region_.StoreBit(encoding.GetStackMaskBitOffset() + index, value);
+ ALWAYS_INLINE void SetStackMaskIndex(const StackMapEncoding& encoding, uint32_t mask) {
+ encoding.GetStackMaskIndexEncoding().Store(region_, mask);
}
ALWAYS_INLINE bool HasDexRegisterMap(const StackMapEncoding& encoding) const {
@@ -863,7 +842,9 @@
}
ALWAYS_INLINE bool Equals(const StackMap& other) const {
- return region_.pointer() == other.region_.pointer() && region_.size() == other.region_.size();
+ return region_.pointer() == other.region_.pointer() &&
+ region_.size() == other.region_.size() &&
+ region_.BitOffset() == other.region_.BitOffset();
}
void Dump(VariableIndentationOutputStream* vios,
@@ -885,7 +866,7 @@
private:
static constexpr int kFixedSize = 0;
- MemoryRegion region_;
+ BitMemoryRegion region_;
friend class StackMapStream;
};
@@ -1051,7 +1032,10 @@
struct CodeInfoEncoding {
uint32_t non_header_size;
uint32_t number_of_stack_maps;
- uint32_t stack_map_size_in_bytes;
+ uint32_t number_of_stack_masks;
+ uint32_t number_of_register_masks;
+ uint32_t stack_mask_size_in_bits;
+ uint32_t register_mask_size_in_bits;
uint32_t number_of_location_catalog_entries;
StackMapEncoding stack_map_encoding;
InlineInfoEncoding inline_info_encoding;
@@ -1063,7 +1047,10 @@
const uint8_t* ptr = reinterpret_cast<const uint8_t*>(data);
non_header_size = DecodeUnsignedLeb128(&ptr);
number_of_stack_maps = DecodeUnsignedLeb128(&ptr);
- stack_map_size_in_bytes = DecodeUnsignedLeb128(&ptr);
+ number_of_stack_masks = DecodeUnsignedLeb128(&ptr);
+ number_of_register_masks = DecodeUnsignedLeb128(&ptr);
+ stack_mask_size_in_bits = DecodeUnsignedLeb128(&ptr);
+ register_mask_size_in_bits = DecodeUnsignedLeb128(&ptr);
number_of_location_catalog_entries = DecodeUnsignedLeb128(&ptr);
static_assert(alignof(StackMapEncoding) == 1,
"StackMapEncoding should not require alignment");
@@ -1084,7 +1071,10 @@
void Compress(Vector* dest) const {
EncodeUnsignedLeb128(dest, non_header_size);
EncodeUnsignedLeb128(dest, number_of_stack_maps);
- EncodeUnsignedLeb128(dest, stack_map_size_in_bytes);
+ EncodeUnsignedLeb128(dest, number_of_stack_masks);
+ EncodeUnsignedLeb128(dest, number_of_register_masks);
+ EncodeUnsignedLeb128(dest, stack_mask_size_in_bits);
+ EncodeUnsignedLeb128(dest, register_mask_size_in_bits);
EncodeUnsignedLeb128(dest, number_of_location_catalog_entries);
const uint8_t* stack_map_ptr = reinterpret_cast<const uint8_t*>(&stack_map_encoding);
dest->insert(dest->end(), stack_map_ptr, stack_map_ptr + sizeof(StackMapEncoding));
@@ -1103,7 +1093,7 @@
*
* where CodeInfoEncoding is of the form:
*
- * [non_header_size, number_of_stack_maps, stack_map_size_in_bytes,
+ * [non_header_size, number_of_stack_maps, stack_map_size_in_bits,
* number_of_location_catalog_entries, StackMapEncoding]
*/
class CodeInfo {
@@ -1118,7 +1108,7 @@
}
CodeInfoEncoding ExtractEncoding() const {
- CodeInfoEncoding encoding(region_.start());
+ CodeInfoEncoding encoding(region_.begin());
AssertValidStackMap(encoding);
return encoding;
}
@@ -1133,9 +1123,41 @@
GetDexRegisterLocationCatalogSize(encoding)));
}
+ ALWAYS_INLINE size_t GetNumberOfStackMaskBits(const CodeInfoEncoding& encoding) const {
+ return encoding.stack_mask_size_in_bits;
+ }
+
ALWAYS_INLINE StackMap GetStackMapAt(size_t i, const CodeInfoEncoding& encoding) const {
- size_t stack_map_size = encoding.stack_map_size_in_bytes;
- return StackMap(GetStackMaps(encoding).Subregion(i * stack_map_size, stack_map_size));
+ const size_t map_size = encoding.stack_map_encoding.BitSize();
+ return StackMap(BitMemoryRegion(GetStackMaps(encoding), i * map_size, map_size));
+ }
+
+ BitMemoryRegion GetStackMask(const CodeInfoEncoding& encoding, size_t stack_mask_index) const {
+ // All stack mask data is stored before register map data (which is at the very end).
+ const size_t entry_size = GetNumberOfStackMaskBits(encoding);
+ const size_t register_mask_bits =
+ encoding.register_mask_size_in_bits * encoding.number_of_register_masks;
+ return BitMemoryRegion(region_,
+ region_.size_in_bits() - register_mask_bits -
+ entry_size * (stack_mask_index + 1),
+ entry_size);
+ }
+
+ BitMemoryRegion GetStackMaskOf(const CodeInfoEncoding& encoding,
+ const StackMap& stack_map) const {
+ return GetStackMask(encoding, stack_map.GetStackMaskIndex(encoding.stack_map_encoding));
+ }
+
+ BitMemoryRegion GetRegisterMask(const CodeInfoEncoding& encoding, size_t index) const {
+ const size_t entry_size = encoding.register_mask_size_in_bits;
+ return BitMemoryRegion(region_,
+ region_.size_in_bits() - entry_size * (index + 1),
+ entry_size);
+ }
+
+ uint32_t GetRegisterMaskOf(const CodeInfoEncoding& encoding, const StackMap& stack_map) const {
+ size_t index = stack_map.GetRegisterMaskIndex(encoding.stack_map_encoding);
+ return GetRegisterMask(encoding, index).LoadBits(0u, encoding.register_mask_size_in_bits);
}
uint32_t GetNumberOfLocationCatalogEntries(const CodeInfoEncoding& encoding) const {
@@ -1151,9 +1173,14 @@
return encoding.number_of_stack_maps;
}
+ // Get the size of all the stack maps of this CodeInfo object, in bits. Not byte aligned.
+ ALWAYS_INLINE size_t GetStackMapsSizeInBits(const CodeInfoEncoding& encoding) const {
+ return encoding.stack_map_encoding.BitSize() * GetNumberOfStackMaps(encoding);
+ }
+
// Get the size of all the stack maps of this CodeInfo object, in bytes.
size_t GetStackMapsSize(const CodeInfoEncoding& encoding) const {
- return encoding.stack_map_size_in_bytes * GetNumberOfStackMaps(encoding);
+ return RoundUp(GetStackMapsSizeInBits(encoding), kBitsPerByte) / kBitsPerByte;
}
uint32_t GetDexRegisterLocationCatalogOffset(const CodeInfoEncoding& encoding) const {
@@ -1303,7 +1330,7 @@
<< encoding.non_header_size << "\n"
<< encoding.number_of_location_catalog_entries << "\n"
<< encoding.number_of_stack_maps << "\n"
- << encoding.stack_map_size_in_bytes;
+ << encoding.stack_map_encoding.BitSize();
}
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index d93eab1..7b65404 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -65,6 +65,7 @@
#include "object_lock.h"
#include "quick_exception_handler.h"
#include "quick/quick_method_frame_info.h"
+#include "read_barrier-inl.h"
#include "reflection.h"
#include "runtime.h"
#include "runtime_callbacks.h"
@@ -77,7 +78,7 @@
#include "thread-inl.h"
#include "utils.h"
#include "verifier/method_verifier.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
#include "well_known_classes.h"
#include "interpreter/interpreter.h"
@@ -1047,9 +1048,10 @@
<< "]";
}
-void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map) const {
+void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map,
+ bool force_dump_stack) const {
DumpState(os);
- DumpStack(os, dump_native_stack, backtrace_map);
+ DumpStack(os, dump_native_stack, backtrace_map, force_dump_stack);
}
mirror::String* Thread::GetThreadName() const {
@@ -1582,15 +1584,24 @@
}
struct StackDumpVisitor : public StackVisitor {
- StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
+ StackDumpVisitor(std::ostream& os_in,
+ Thread* thread_in,
+ Context* context,
+ bool can_allocate_in,
+ bool check_suspended = true,
+ bool dump_locks_in = true)
REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ : StackVisitor(thread_in,
+ context,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ check_suspended),
os(os_in),
can_allocate(can_allocate_in),
last_method(nullptr),
last_line_number(0),
repetition_count(0),
- frame_count(0) {}
+ frame_count(0),
+ dump_locks(dump_locks_in) {}
virtual ~StackDumpVisitor() {
if (frame_count == 0) {
@@ -1635,8 +1646,10 @@
if (frame_count == 0) {
Monitor::DescribeWait(os, GetThread());
}
- if (can_allocate) {
+ if (can_allocate && dump_locks) {
// Visit locks, but do not abort on errors. This would trigger a nested abort.
+ // Skip visiting locks if dump_locks is false as it would cause a bad_mutexes_held in
+ // RegTypeCache::RegTypeCache due to thread_list_lock.
Monitor::VisitLocks(this, DumpLockedObject, &os, false);
}
}
@@ -1680,6 +1693,7 @@
int last_line_number;
int repetition_count;
int frame_count;
+ const bool dump_locks;
};
static bool ShouldShowNativeStack(const Thread* thread)
@@ -1711,7 +1725,7 @@
return current_method != nullptr && current_method->IsNative();
}
-void Thread::DumpJavaStack(std::ostream& os) const {
+void Thread::DumpJavaStack(std::ostream& os, bool check_suspended, bool dump_locks) const {
// If flip_function is not null, it means we have run a checkpoint
// before the thread wakes up to execute the flip function and the
// thread roots haven't been forwarded. So the following access to
@@ -1740,7 +1754,7 @@
std::unique_ptr<Context> context(Context::Create());
StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
- !tls32_.throwing_OutOfMemoryError);
+ !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks);
dumper.WalkStack();
if (have_exception) {
@@ -1750,7 +1764,8 @@
void Thread::DumpStack(std::ostream& os,
bool dump_native_stack,
- BacktraceMap* backtrace_map) const {
+ BacktraceMap* backtrace_map,
+ bool force_dump_stack) const {
// TODO: we call this code when dying but may not have suspended the thread ourself. The
// IsSuspended check is therefore racy with the use for dumping (normally we inhibit
// the race with the thread_suspend_count_lock_).
@@ -1761,14 +1776,19 @@
// thread's stack in debug builds where we'll hit the not suspended check in the stack walk.
safe_to_dump = (safe_to_dump || dump_for_abort);
}
- if (safe_to_dump) {
+ if (safe_to_dump || force_dump_stack) {
// If we're currently in native code, dump that stack before dumping the managed stack.
- if (dump_native_stack && (dump_for_abort || ShouldShowNativeStack(this))) {
+ if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
- ArtMethod* method = GetCurrentMethod(nullptr, !dump_for_abort);
+ ArtMethod* method =
+ GetCurrentMethod(nullptr,
+ /*check_suspended*/ !force_dump_stack,
+ /*abort_on_error*/ !(dump_for_abort || force_dump_stack));
DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method);
}
- DumpJavaStack(os);
+ DumpJavaStack(os,
+ /*check_suspended*/ !force_dump_stack,
+ /*dump_locks*/ !force_dump_stack);
} else {
os << "Not able to dump stack of thread that isn't suspended";
}
@@ -1843,6 +1863,7 @@
: tls32_(daemon),
wait_monitor_(nullptr),
interrupted_(false),
+ custom_tls_(nullptr),
can_call_into_java_(true) {
wait_mutex_ = new Mutex("a thread wait mutex");
wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
@@ -2188,12 +2209,18 @@
tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override);
}
-class CountStackDepthVisitor : public StackVisitor {
+using ArtMethodDexPcPair = std::pair<ArtMethod*, uint32_t>;
+
+// Counts the stack trace depth and also fetches the first max_saved_frames frames.
+class FetchStackTraceVisitor : public StackVisitor {
public:
- explicit CountStackDepthVisitor(Thread* thread)
+ explicit FetchStackTraceVisitor(Thread* thread,
+ ArtMethodDexPcPair* saved_frames = nullptr,
+ size_t max_saved_frames = 0)
REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- depth_(0), skip_depth_(0), skipping_(true) {}
+ saved_frames_(saved_frames),
+ max_saved_frames_(max_saved_frames) {}
bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
// We want to skip frames up to and including the exception's constructor.
@@ -2206,6 +2233,10 @@
}
if (!skipping_) {
if (!m->IsRuntimeMethod()) { // Ignore runtime frames (in particular callee save).
+ if (depth_ < max_saved_frames_) {
+ saved_frames_[depth_].first = m;
+ saved_frames_[depth_].second = m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc();
+ }
++depth_;
}
} else {
@@ -2214,20 +2245,22 @@
return true;
}
- int GetDepth() const {
+ uint32_t GetDepth() const {
return depth_;
}
- int GetSkipDepth() const {
+ uint32_t GetSkipDepth() const {
return skip_depth_;
}
private:
- uint32_t depth_;
- uint32_t skip_depth_;
- bool skipping_;
+ uint32_t depth_ = 0;
+ uint32_t skip_depth_ = 0;
+ bool skipping_ = true;
+ ArtMethodDexPcPair* saved_frames_;
+ const size_t max_saved_frames_;
- DISALLOW_COPY_AND_ASSIGN(CountStackDepthVisitor);
+ DISALLOW_COPY_AND_ASSIGN(FetchStackTraceVisitor);
};
template<bool kTransactionActive>
@@ -2237,8 +2270,6 @@
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
self_(self),
skip_depth_(skip_depth),
- count_(0),
- trace_(nullptr),
pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {}
bool Init(int depth) REQUIRES_SHARED(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) {
@@ -2290,17 +2321,21 @@
if (m->IsRuntimeMethod()) {
return true; // Ignore runtime frames (in particular callee save).
}
+ AddFrame(m, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc());
+ return true;
+ }
+
+ void AddFrame(ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::PointerArray> trace_methods_and_pcs = GetTraceMethodsAndPCs();
- trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(count_, m, pointer_size_);
+ trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(count_, method, pointer_size_);
trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(
trace_methods_and_pcs->GetLength() / 2 + count_,
- m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc(),
+ dex_pc,
pointer_size_);
// Save the declaring class of the method to ensure that the declaring classes of the methods
// do not get unloaded while the stack trace is live.
- trace_->Set(count_ + 1, m->GetDeclaringClass());
+ trace_->Set(count_ + 1, method->GetDeclaringClass());
++count_;
- return true;
}
ObjPtr<mirror::PointerArray> GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -2316,12 +2351,12 @@
// How many more frames to skip.
int32_t skip_depth_;
// Current position down stack trace.
- uint32_t count_;
+ uint32_t count_ = 0;
// An object array where the first element is a pointer array that contains the ArtMethod
// pointers on the stack and dex PCs. The rest of the elements are the declaring
// class of the ArtMethod pointers. trace_[i+1] contains the declaring class of the ArtMethod of
// the i'th frame.
- mirror::ObjectArray<mirror::Object>* trace_;
+ mirror::ObjectArray<mirror::Object>* trace_ = nullptr;
// For cross compilation.
const PointerSize pointer_size_;
@@ -2330,11 +2365,15 @@
template<bool kTransactionActive>
jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
- // Compute depth of stack
- CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
+ // Compute depth of stack, save frames if possible to avoid needing to recompute many.
+ constexpr size_t kMaxSavedFrames = 256;
+ std::unique_ptr<ArtMethodDexPcPair[]> saved_frames(new ArtMethodDexPcPair[kMaxSavedFrames]);
+ FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this),
+ &saved_frames[0],
+ kMaxSavedFrames);
count_visitor.WalkStack();
- int32_t depth = count_visitor.GetDepth();
- int32_t skip_depth = count_visitor.GetSkipDepth();
+ const uint32_t depth = count_visitor.GetDepth();
+ const uint32_t skip_depth = count_visitor.GetSkipDepth();
// Build internal stack trace.
BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(),
@@ -2343,7 +2382,16 @@
if (!build_trace_visitor.Init(depth)) {
return nullptr; // Allocation failed.
}
- build_trace_visitor.WalkStack();
+ // If we saved all of the frames we don't even need to do the actual stack walk. This is faster
+ // than doing the stack walk twice.
+ if (depth < kMaxSavedFrames) {
+ for (size_t i = 0; i < depth; ++i) {
+ build_trace_visitor.AddFrame(saved_frames[i].first, saved_frames[i].second);
+ }
+ } else {
+ build_trace_visitor.WalkStack();
+ }
+
mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
if (kIsDebugBuild) {
ObjPtr<mirror::PointerArray> trace_methods = build_trace_visitor.GetTraceMethodsAndPCs();
@@ -2362,9 +2410,10 @@
const ScopedObjectAccessAlreadyRunnable& soa) const;
bool Thread::IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const {
- CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
+ // Only count the depth since we do not pass a stack frame array as an argument.
+ FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this));
count_visitor.WalkStack();
- return count_visitor.GetDepth() == exception->GetStackDepth();
+ return count_visitor.GetDepth() == static_cast<uint32_t>(exception->GetStackDepth());
}
jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
@@ -2663,6 +2712,10 @@
return; \
}
QUICK_ENTRY_POINT_INFO(pAllocArrayResolved)
+ QUICK_ENTRY_POINT_INFO(pAllocArrayResolved8)
+ QUICK_ENTRY_POINT_INFO(pAllocArrayResolved16)
+ QUICK_ENTRY_POINT_INFO(pAllocArrayResolved32)
+ QUICK_ENTRY_POINT_INFO(pAllocArrayResolved64)
QUICK_ENTRY_POINT_INFO(pAllocObjectResolved)
QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized)
QUICK_ENTRY_POINT_INFO(pAllocObjectWithChecks)
@@ -2848,13 +2901,16 @@
if (Dbg::IsForcedInterpreterNeededForException(this)) {
NthCallerVisitor visitor(this, 0, false);
visitor.WalkStack();
- if (Runtime::Current()->IsDeoptimizeable(visitor.caller_pc)) {
+ if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller_pc)) {
// Save the exception into the deoptimization context so it can be restored
// before entering the interpreter.
PushDeoptimizationContext(
JValue(), /*is_reference */ false, /* from_code */ false, exception);
artDeoptimize(this);
UNREACHABLE();
+ } else {
+ LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
+ << visitor.caller->PrettyMethod();
}
}
@@ -2881,9 +2937,12 @@
// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
struct CurrentMethodVisitor FINAL : public StackVisitor {
- CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
+ CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error)
REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ : StackVisitor(thread,
+ context,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ check_suspended),
this_object_(nullptr),
method_(nullptr),
dex_pc_(0),
@@ -2907,8 +2966,13 @@
const bool abort_on_error_;
};
-ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
- CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error);
+ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc,
+ bool check_suspended,
+ bool abort_on_error) const {
+ CurrentMethodVisitor visitor(const_cast<Thread*>(this),
+ nullptr,
+ check_suspended,
+ abort_on_error);
visitor.WalkStack(false);
if (dex_pc != nullptr) {
*dex_pc = visitor.dex_pc_;
@@ -3031,9 +3095,10 @@
T vreg_info(m, code_info, encoding, map, visitor_);
// Visit stack entries that hold pointers.
- size_t number_of_bits = map.GetNumberOfStackMaskBits(encoding.stack_map_encoding);
+ const size_t number_of_bits = code_info.GetNumberOfStackMaskBits(encoding);
+ BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, map);
for (size_t i = 0; i < number_of_bits; ++i) {
- if (map.GetStackMaskBit(encoding.stack_map_encoding, i)) {
+ if (stack_mask.LoadBit(i)) {
auto* ref_addr = vreg_base + i;
mirror::Object* ref = ref_addr->AsMirrorPtr();
if (ref != nullptr) {
@@ -3041,12 +3106,12 @@
vreg_info.VisitStack(&new_ref, i, this);
if (ref != new_ref) {
ref_addr->Assign(new_ref);
- }
+ }
}
}
}
// Visit callee-save registers that hold pointers.
- uint32_t register_mask = map.GetRegisterMask(encoding.stack_map_encoding);
+ uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, map);
for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
if (register_mask & (1 << i)) {
mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
@@ -3419,4 +3484,15 @@
return Runtime::Current()->IsAotCompiler();
}
+mirror::Object* Thread::GetPeerFromOtherThread() const {
+ mirror::Object* peer = GetPeer();
+ if (kUseReadBarrier && Current()->GetIsGcMarking()) {
+ // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
+ // may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly
+ // mark/forward it here.
+ peer = art::ReadBarrier::Mark(peer);
+ }
+ return peer;
+}
+
} // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index b609e72..3a1b7da 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -196,11 +196,14 @@
// Dumps the detailed thread state and the thread stack (used for SIGQUIT).
void Dump(std::ostream& os,
bool dump_native_stack = true,
- BacktraceMap* backtrace_map = nullptr) const
+ BacktraceMap* backtrace_map = nullptr,
+ bool force_dump_stack = false) const
REQUIRES(!Locks::thread_suspend_count_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- void DumpJavaStack(std::ostream& os) const
+ void DumpJavaStack(std::ostream& os,
+ bool check_suspended = true,
+ bool dump_locks = true) const
REQUIRES(!Locks::thread_suspend_count_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -358,6 +361,10 @@
CHECK(tlsPtr_.jpeer == nullptr);
return tlsPtr_.opeer;
}
+ // GetPeer is not safe if called on another thread in the middle of the CC thread flip and
+ // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
+ // This function will explicitly mark/forward it.
+ mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_);
bool HasPeer() const {
return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
@@ -410,7 +417,9 @@
// Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
// abort the runtime iff abort_on_error is true.
- ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
+ ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
+ bool check_suspended = true,
+ bool abort_on_error = true) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Returns whether the given exception was thrown by the current Java method being executed
@@ -1204,7 +1213,8 @@
void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
void DumpStack(std::ostream& os,
bool dump_native_stack = true,
- BacktraceMap* backtrace_map = nullptr) const
+ BacktraceMap* backtrace_map = nullptr,
+ bool force_dump_stack = false) const
REQUIRES(!Locks::thread_suspend_count_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 01c940e..df8acc3 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -455,7 +455,6 @@
Closure* flip_callback,
gc::collector::GarbageCollector* collector) {
TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings());
- const uint64_t start_time = NanoTime();
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertNotHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
@@ -464,13 +463,17 @@
collector->GetHeap()->ThreadFlipBegin(self); // Sync with JNI critical calls.
+ // ThreadFlipBegin happens before we suspend all the threads, so it does not count towards the
+ // pause.
+ const uint64_t suspend_start_time = NanoTime();
SuspendAllInternal(self, self, nullptr);
// Run the flip callback for the collector.
Locks::mutator_lock_->ExclusiveLock(self);
+ suspend_all_historam_.AdjustAndAddValue(NanoTime() - suspend_start_time);
flip_callback->Run(self);
Locks::mutator_lock_->ExclusiveUnlock(self);
- collector->RegisterPause(NanoTime() - start_time);
+ collector->RegisterPause(NanoTime() - suspend_start_time);
// Resume runnable threads.
size_t runnable_thread_count = 0;
@@ -629,8 +632,9 @@
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
// Update global suspend all state for attaching threads.
++suspend_all_count_;
- if (debug_suspend)
+ if (debug_suspend) {
++debug_suspend_all_count_;
+ }
pending_threads.StoreRelaxed(list_.size() - num_ignored);
// Increment everybody's suspend count (except those that should be ignored).
for (const auto& thread : list_) {
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index d9179c3..d24a5e5 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -88,7 +88,10 @@
void* ThreadPoolWorker::Callback(void* arg) {
ThreadPoolWorker* worker = reinterpret_cast<ThreadPoolWorker*>(arg);
Runtime* runtime = Runtime::Current();
- CHECK(runtime->AttachCurrentThread(worker->name_.c_str(), true, nullptr, false));
+ CHECK(runtime->AttachCurrentThread(worker->name_.c_str(),
+ true,
+ nullptr,
+ worker->thread_pool_->create_peers_));
worker->thread_ = Thread::Current();
// Thread pool workers cannot call into java.
worker->thread_->SetCanCallIntoJava(false);
@@ -112,7 +115,7 @@
tasks_.clear();
}
-ThreadPool::ThreadPool(const char* name, size_t num_threads)
+ThreadPool::ThreadPool(const char* name, size_t num_threads, bool create_peers)
: name_(name),
task_queue_lock_("task queue lock"),
task_queue_condition_("task queue condition", task_queue_lock_),
@@ -124,7 +127,8 @@
total_wait_time_(0),
// Add one since the caller of constructor waits on the barrier too.
creation_barier_(num_threads + 1),
- max_active_workers_(num_threads) {
+ max_active_workers_(num_threads),
+ create_peers_(create_peers) {
Thread* self = Thread::Current();
while (GetThreadCount() < num_threads) {
const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(),
@@ -217,6 +221,7 @@
void ThreadPool::Wait(Thread* self, bool do_work, bool may_hold_locks) {
if (do_work) {
+ CHECK(!create_peers_);
Task* task = nullptr;
while ((task = TryGetTask(self)) != nullptr) {
task->Run(self);
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 7ecfcd1..a465e11 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -105,11 +105,17 @@
// Remove all tasks in the queue.
void RemoveAllTasks(Thread* self) REQUIRES(!task_queue_lock_);
- ThreadPool(const char* name, size_t num_threads);
+ // Create a named thread pool with the given number of threads.
+ //
+ // If create_peers is true, all worker threads will have a Java peer object. Note that if the
+ // pool is asked to do work on the current thread (see Wait), a peer may not be available. Wait
+ // will conservatively abort if create_peers and do_work are true.
+ ThreadPool(const char* name, size_t num_threads, bool create_peers = false);
virtual ~ThreadPool();
// Wait for all tasks currently on queue to get completed. If the pool has been stopped, only
// wait till all already running tasks are done.
+ // When the pool was created with peers for workers, do_work must not be true (see ThreadPool()).
void Wait(Thread* self, bool do_work, bool may_hold_locks) REQUIRES(!task_queue_lock_);
size_t GetTaskCount(Thread* self) REQUIRES(!task_queue_lock_);
@@ -159,6 +165,7 @@
uint64_t total_wait_time_;
Barrier creation_barier_;
size_t max_active_workers_ GUARDED_BY(task_queue_lock_);
+ const bool create_peers_;
private:
friend class ThreadPoolWorker;
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index 14c2c3b..28aa21f 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -20,6 +20,7 @@
#include "atomic.h"
#include "common_runtime_test.h"
+#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
namespace art {
@@ -159,4 +160,55 @@
EXPECT_EQ((1 << depth) - 1, count.LoadSequentiallyConsistent());
}
+class PeerTask : public Task {
+ public:
+ PeerTask() {}
+
+ void Run(Thread* self) {
+ ScopedObjectAccess soa(self);
+ CHECK(self->GetPeer() != nullptr);
+ }
+
+ void Finalize() {
+ delete this;
+ }
+};
+
+class NoPeerTask : public Task {
+ public:
+ NoPeerTask() {}
+
+ void Run(Thread* self) {
+ ScopedObjectAccess soa(self);
+ CHECK(self->GetPeer() == nullptr);
+ }
+
+ void Finalize() {
+ delete this;
+ }
+};
+
+// Tests for create_peer functionality.
+TEST_F(ThreadPoolTest, PeerTest) {
+ Thread* self = Thread::Current();
+ {
+ ThreadPool thread_pool("Thread pool test thread pool", 1);
+ thread_pool.AddTask(self, new NoPeerTask());
+ thread_pool.StartWorkers(self);
+ thread_pool.Wait(self, false, false);
+ }
+
+ {
+ // To create peers, the runtime needs to be started.
+ self->TransitionFromSuspendedToRunnable();
+ bool started = runtime_->Start();
+ ASSERT_TRUE(started);
+
+ ThreadPool thread_pool("Thread pool test thread pool", 1, true);
+ thread_pool.AddTask(self, new PeerTask());
+ thread_pool.StartWorkers(self);
+ thread_pool.Wait(self, false, false);
+ }
+}
+
} // namespace art
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 2add955..3a9975a 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -905,6 +905,9 @@
void Trace::LogMethodTraceEvent(Thread* thread, ArtMethod* method,
instrumentation::Instrumentation::InstrumentationEvent event,
uint32_t thread_clock_diff, uint32_t wall_clock_diff) {
+ // Ensure we always use the non-obsolete version of the method so that entry/exit events have the
+ // same pointer value.
+ method = method->GetNonObsoleteMethod();
// Advance cur_offset_ atomically.
int32_t new_offset;
int32_t old_offset = 0;
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index 2536968..56ff0a1 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -41,12 +41,12 @@
MutexLock mu(Thread::Current(), log_lock_);
size_t objects_count = object_logs_.size();
size_t field_values_count = 0;
- for (auto it : object_logs_) {
+ for (const auto& it : object_logs_) {
field_values_count += it.second.Size();
}
size_t array_count = array_logs_.size();
size_t array_values_count = 0;
- for (auto it : array_logs_) {
+ for (const auto& it : array_logs_) {
array_values_count += it.second.Size();
}
size_t intern_string_count = intern_string_logs_.size();
@@ -100,24 +100,30 @@
return abort_message_;
}
-void Transaction::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
- uint8_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldBoolean(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint8_t value,
+ bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
ObjectLog& object_log = object_logs_[obj];
object_log.LogBooleanValue(field_offset, value, is_volatile);
}
-void Transaction::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset,
- int8_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldByte(mirror::Object* obj,
+ MemberOffset field_offset,
+ int8_t value,
+ bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
ObjectLog& object_log = object_logs_[obj];
object_log.LogByteValue(field_offset, value, is_volatile);
}
-void Transaction::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset,
- uint16_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldChar(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint16_t value,
+ bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
ObjectLog& object_log = object_logs_[obj];
@@ -125,8 +131,10 @@
}
-void Transaction::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset,
- int16_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldShort(mirror::Object* obj,
+ MemberOffset field_offset,
+ int16_t value,
+ bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
ObjectLog& object_log = object_logs_[obj];
@@ -134,7 +142,9 @@
}
-void Transaction::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
+void Transaction::RecordWriteField32(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint32_t value,
bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
@@ -142,7 +152,9 @@
object_log.Log32BitsValue(field_offset, value, is_volatile);
}
-void Transaction::RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
+void Transaction::RecordWriteField64(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint64_t value,
bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
@@ -150,8 +162,10 @@
object_log.Log64BitsValue(field_offset, value, is_volatile);
}
-void Transaction::RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
- mirror::Object* value, bool is_volatile) {
+void Transaction::RecordWriteFieldReference(mirror::Object* obj,
+ MemberOffset field_offset,
+ mirror::Object* value,
+ bool is_volatile) {
DCHECK(obj != nullptr);
MutexLock mu(Thread::Current(), log_lock_);
ObjectLog& object_log = object_logs_[obj];
@@ -163,8 +177,12 @@
DCHECK(array->IsArrayInstance());
DCHECK(!array->IsObjectArray());
MutexLock mu(Thread::Current(), log_lock_);
- ArrayLog& array_log = array_logs_[array];
- array_log.LogValue(index, value);
+ auto it = array_logs_.find(array);
+ if (it == array_logs_.end()) {
+ ArrayLog log;
+ it = array_logs_.emplace(array, std::move(log)).first;
+ }
+ it->second.LogValue(index, value);
}
void Transaction::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,
@@ -172,33 +190,33 @@
DCHECK(dex_cache != nullptr);
DCHECK_LT(string_idx.index_, dex_cache->GetDexFile()->NumStringIds());
MutexLock mu(Thread::Current(), log_lock_);
- resolve_string_logs_.push_back(ResolveStringLog(dex_cache, string_idx));
+ resolve_string_logs_.emplace_back(dex_cache, string_idx);
}
void Transaction::RecordStrongStringInsertion(ObjPtr<mirror::String> s) {
InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kInsert);
- LogInternedString(log);
+ LogInternedString(std::move(log));
}
void Transaction::RecordWeakStringInsertion(ObjPtr<mirror::String> s) {
InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kInsert);
- LogInternedString(log);
+ LogInternedString(std::move(log));
}
void Transaction::RecordStrongStringRemoval(ObjPtr<mirror::String> s) {
InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kRemove);
- LogInternedString(log);
+ LogInternedString(std::move(log));
}
void Transaction::RecordWeakStringRemoval(ObjPtr<mirror::String> s) {
InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kRemove);
- LogInternedString(log);
+ LogInternedString(std::move(log));
}
-void Transaction::LogInternedString(const InternStringLog& log) {
+void Transaction::LogInternedString(InternStringLog&& log) {
Locks::intern_table_lock_->AssertExclusiveHeld(Thread::Current());
MutexLock mu(Thread::Current(), log_lock_);
- intern_string_logs_.push_front(log);
+ intern_string_logs_.push_front(std::move(log));
}
void Transaction::Rollback() {
@@ -216,7 +234,7 @@
void Transaction::UndoObjectModifications() {
// TODO we may not need to restore objects allocated during this transaction. Or we could directly
// remove them from the heap.
- for (auto it : object_logs_) {
+ for (const auto& it : object_logs_) {
it.second.Undo(it.first);
}
object_logs_.clear();
@@ -225,7 +243,7 @@
void Transaction::UndoArrayModifications() {
// TODO we may not need to restore array allocated during this transaction. Or we could directly
// remove them from the heap.
- for (auto it : array_logs_) {
+ for (const auto& it : array_logs_) {
it.second.Undo(it.first);
}
array_logs_.clear();
@@ -235,7 +253,7 @@
InternTable* const intern_table = Runtime::Current()->GetInternTable();
// We want to undo each operation from the most recent to the oldest. List has been filled so the
// most recent operation is at list begin so just have to iterate over it.
- for (InternStringLog& string_log : intern_string_logs_) {
+ for (const InternStringLog& string_log : intern_string_logs_) {
string_log.Undo(intern_table);
}
intern_string_logs_.clear();
@@ -262,7 +280,7 @@
std::list<ObjectPair> moving_roots;
// Visit roots.
- for (auto it : object_logs_) {
+ for (auto& it : object_logs_) {
it.second.VisitRoots(visitor);
mirror::Object* old_root = it.first;
mirror::Object* new_root = old_root;
@@ -279,7 +297,7 @@
auto old_root_it = object_logs_.find(old_root);
CHECK(old_root_it != object_logs_.end());
CHECK(object_logs_.find(new_root) == object_logs_.end());
- object_logs_.insert(std::make_pair(new_root, old_root_it->second));
+ object_logs_.emplace(new_root, std::move(old_root_it->second));
object_logs_.erase(old_root_it);
}
}
@@ -289,7 +307,7 @@
typedef std::pair<mirror::Array*, mirror::Array*> ArrayPair;
std::list<ArrayPair> moving_roots;
- for (auto it : array_logs_) {
+ for (auto& it : array_logs_) {
mirror::Array* old_root = it.first;
CHECK(!old_root->IsObjectArray());
mirror::Array* new_root = old_root;
@@ -306,7 +324,7 @@
auto old_root_it = array_logs_.find(old_root);
CHECK(old_root_it != array_logs_.end());
CHECK(array_logs_.find(new_root) == array_logs_.end());
- array_logs_.insert(std::make_pair(new_root, old_root_it->second));
+ array_logs_.emplace(new_root, std::move(old_root_it->second));
array_logs_.erase(old_root_it);
}
}
@@ -347,23 +365,27 @@
LogValue(ObjectLog::k64Bits, offset, value, is_volatile);
}
-void Transaction::ObjectLog::LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile) {
+void Transaction::ObjectLog::LogReferenceValue(MemberOffset offset,
+ mirror::Object* obj,
+ bool is_volatile) {
LogValue(ObjectLog::kReference, offset, reinterpret_cast<uintptr_t>(obj), is_volatile);
}
void Transaction::ObjectLog::LogValue(ObjectLog::FieldValueKind kind,
- MemberOffset offset, uint64_t value, bool is_volatile) {
+ MemberOffset offset,
+ uint64_t value,
+ bool is_volatile) {
auto it = field_values_.find(offset.Uint32Value());
if (it == field_values_.end()) {
ObjectLog::FieldValue field_value;
field_value.value = value;
field_value.is_volatile = is_volatile;
field_value.kind = kind;
- field_values_.insert(std::make_pair(offset.Uint32Value(), field_value));
+ field_values_.emplace(offset.Uint32Value(), std::move(field_value));
}
}
-void Transaction::ObjectLog::Undo(mirror::Object* obj) {
+void Transaction::ObjectLog::Undo(mirror::Object* obj) const {
for (auto& it : field_values_) {
// Garbage collector needs to access object's class and array's length. So we don't rollback
// these values.
@@ -377,60 +399,71 @@
// Skip Array::length field.
continue;
}
- FieldValue& field_value = it.second;
+ const FieldValue& field_value = it.second;
UndoFieldWrite(obj, field_offset, field_value);
}
}
-void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset,
- const FieldValue& field_value) {
+void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj,
+ MemberOffset field_offset,
+ const FieldValue& field_value) const {
// TODO We may want to abort a transaction while still being in transaction mode. In this case,
// we'd need to disable the check.
constexpr bool kCheckTransaction = true;
switch (field_value.kind) {
case kBoolean:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetFieldBooleanVolatile<false, kCheckTransaction>(field_offset,
- static_cast<bool>(field_value.value));
+ obj->SetFieldBooleanVolatile<false, kCheckTransaction>(
+ field_offset,
+ static_cast<bool>(field_value.value));
} else {
- obj->SetFieldBoolean<false, kCheckTransaction>(field_offset,
- static_cast<bool>(field_value.value));
+ obj->SetFieldBoolean<false, kCheckTransaction>(
+ field_offset,
+ static_cast<bool>(field_value.value));
}
break;
case kByte:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetFieldByteVolatile<false, kCheckTransaction>(field_offset,
- static_cast<int8_t>(field_value.value));
+ obj->SetFieldByteVolatile<false, kCheckTransaction>(
+ field_offset,
+ static_cast<int8_t>(field_value.value));
} else {
- obj->SetFieldByte<false, kCheckTransaction>(field_offset,
- static_cast<int8_t>(field_value.value));
+ obj->SetFieldByte<false, kCheckTransaction>(
+ field_offset,
+ static_cast<int8_t>(field_value.value));
}
break;
case kChar:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetFieldCharVolatile<false, kCheckTransaction>(field_offset,
- static_cast<uint16_t>(field_value.value));
+ obj->SetFieldCharVolatile<false, kCheckTransaction>(
+ field_offset,
+ static_cast<uint16_t>(field_value.value));
} else {
- obj->SetFieldChar<false, kCheckTransaction>(field_offset,
- static_cast<uint16_t>(field_value.value));
+ obj->SetFieldChar<false, kCheckTransaction>(
+ field_offset,
+ static_cast<uint16_t>(field_value.value));
}
break;
case kShort:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetFieldShortVolatile<false, kCheckTransaction>(field_offset,
- static_cast<int16_t>(field_value.value));
+ obj->SetFieldShortVolatile<false, kCheckTransaction>(
+ field_offset,
+ static_cast<int16_t>(field_value.value));
} else {
- obj->SetFieldShort<false, kCheckTransaction>(field_offset,
- static_cast<int16_t>(field_value.value));
+ obj->SetFieldShort<false, kCheckTransaction>(
+ field_offset,
+ static_cast<int16_t>(field_value.value));
}
break;
case k32Bits:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetField32Volatile<false, kCheckTransaction>(field_offset,
- static_cast<uint32_t>(field_value.value));
+ obj->SetField32Volatile<false, kCheckTransaction>(
+ field_offset,
+ static_cast<uint32_t>(field_value.value));
} else {
- obj->SetField32<false, kCheckTransaction>(field_offset,
- static_cast<uint32_t>(field_value.value));
+ obj->SetField32<false, kCheckTransaction>(
+ field_offset,
+ static_cast<uint32_t>(field_value.value));
}
break;
case k64Bits:
@@ -442,11 +475,13 @@
break;
case kReference:
if (UNLIKELY(field_value.is_volatile)) {
- obj->SetFieldObjectVolatile<false, kCheckTransaction>(field_offset,
- reinterpret_cast<mirror::Object*>(field_value.value));
+ obj->SetFieldObjectVolatile<false, kCheckTransaction>(
+ field_offset,
+ reinterpret_cast<mirror::Object*>(field_value.value));
} else {
- obj->SetFieldObject<false, kCheckTransaction>(field_offset,
- reinterpret_cast<mirror::Object*>(field_value.value));
+ obj->SetFieldObject<false, kCheckTransaction>(
+ field_offset,
+ reinterpret_cast<mirror::Object*>(field_value.value));
}
break;
default:
@@ -456,7 +491,7 @@
}
void Transaction::ObjectLog::VisitRoots(RootVisitor* visitor) {
- for (auto it : field_values_) {
+ for (auto& it : field_values_) {
FieldValue& field_value = it.second;
if (field_value.kind == ObjectLog::kReference) {
visitor->VisitRootIfNonNull(reinterpret_cast<mirror::Object**>(&field_value.value),
@@ -465,7 +500,7 @@
}
}
-void Transaction::InternStringLog::Undo(InternTable* intern_table) {
+void Transaction::InternStringLog::Undo(InternTable* intern_table) const {
DCHECK(intern_table != nullptr);
switch (string_op_) {
case InternStringLog::kInsert: {
@@ -506,7 +541,7 @@
str_.VisitRoot(visitor, RootInfo(kRootInternedString));
}
-void Transaction::ResolveStringLog::Undo() {
+void Transaction::ResolveStringLog::Undo() const {
dex_cache_.Read()->ClearString(string_idx_);
}
@@ -538,7 +573,7 @@
}
}
-void Transaction::ArrayLog::Undo(mirror::Array* array) {
+void Transaction::ArrayLog::Undo(mirror::Array* array) const {
DCHECK(array != nullptr);
DCHECK(array->IsArrayInstance());
Primitive::Type type = array->GetClass()->GetComponentType()->GetPrimitiveType();
@@ -547,8 +582,10 @@
}
}
-void Transaction::ArrayLog::UndoArrayWrite(mirror::Array* array, Primitive::Type array_type,
- size_t index, uint64_t value) {
+void Transaction::ArrayLog::UndoArrayWrite(mirror::Array* array,
+ Primitive::Type array_type,
+ size_t index,
+ uint64_t value) const {
// TODO We may want to abort a transaction while still being in transaction mode. In this case,
// we'd need to disable the check.
switch (array_type) {
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 1774657..7aa98cd 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -56,26 +56,40 @@
bool IsAborted() REQUIRES(!log_lock_);
// Record object field changes.
- void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
+ void RecordWriteFieldBoolean(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint8_t value,
bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
- bool is_volatile)
- REQUIRES(!log_lock_);
- void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
+ void RecordWriteFieldByte(mirror::Object* obj,
+ MemberOffset field_offset,
+ int8_t value,
bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
+ void RecordWriteFieldChar(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint16_t value,
+ bool is_volatile)
+ REQUIRES(!log_lock_);
+ void RecordWriteFieldShort(mirror::Object* obj,
+ MemberOffset field_offset,
+ int16_t value,
bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
+ void RecordWriteField32(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint32_t value,
bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
+ void RecordWriteField64(mirror::Object* obj,
+ MemberOffset field_offset,
+ uint64_t value,
bool is_volatile)
REQUIRES(!log_lock_);
- void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
- mirror::Object* value, bool is_volatile)
+ void RecordWriteFieldReference(mirror::Object* obj,
+ MemberOffset field_offset,
+ mirror::Object* value,
+ bool is_volatile)
REQUIRES(!log_lock_);
// Record array change.
@@ -122,13 +136,16 @@
void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile);
void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile);
- void Undo(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Undo(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
size_t Size() const {
return field_values_.size();
}
+ ObjectLog() = default;
+ ObjectLog(ObjectLog&& log) = default;
+
private:
enum FieldValueKind {
kBoolean,
@@ -144,33 +161,49 @@
uint64_t value;
FieldValueKind kind;
bool is_volatile;
+
+ FieldValue() = default;
+ FieldValue(FieldValue&& log) = default;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldValue);
};
void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile);
- void UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset,
- const FieldValue& field_value) REQUIRES_SHARED(Locks::mutator_lock_);
+ void UndoFieldWrite(mirror::Object* obj,
+ MemberOffset field_offset,
+ const FieldValue& field_value) const REQUIRES_SHARED(Locks::mutator_lock_);
// Maps field's offset to its value.
std::map<uint32_t, FieldValue> field_values_;
+
+ DISALLOW_COPY_AND_ASSIGN(ObjectLog);
};
class ArrayLog : public ValueObject {
public:
void LogValue(size_t index, uint64_t value);
- void Undo(mirror::Array* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Undo(mirror::Array* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
size_t Size() const {
return array_values_.size();
}
+ ArrayLog() = default;
+ ArrayLog(ArrayLog&& log) = default;
+
private:
- void UndoArrayWrite(mirror::Array* array, Primitive::Type array_type, size_t index,
- uint64_t value) REQUIRES_SHARED(Locks::mutator_lock_);
+ void UndoArrayWrite(mirror::Array* array,
+ Primitive::Type array_type,
+ size_t index,
+ uint64_t value) const REQUIRES_SHARED(Locks::mutator_lock_);
// Maps index to value.
// TODO use JValue instead ?
std::map<size_t, uint64_t> array_values_;
+
+ DISALLOW_COPY_AND_ASSIGN(ArrayLog);
};
class InternStringLog : public ValueObject {
@@ -185,31 +218,38 @@
};
InternStringLog(ObjPtr<mirror::String> s, StringKind kind, StringOp op);
- void Undo(InternTable* intern_table)
+ void Undo(InternTable* intern_table) const
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+ InternStringLog() = default;
+ InternStringLog(InternStringLog&& log) = default;
+
private:
- GcRoot<mirror::String> str_;
+ mutable GcRoot<mirror::String> str_;
const StringKind string_kind_;
const StringOp string_op_;
+
+ DISALLOW_COPY_AND_ASSIGN(InternStringLog);
};
class ResolveStringLog : public ValueObject {
public:
ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx);
- void Undo() REQUIRES_SHARED(Locks::mutator_lock_);
+ void Undo() const REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
GcRoot<mirror::DexCache> dex_cache_;
const dex::StringIndex string_idx_;
+
+ DISALLOW_COPY_AND_ASSIGN(ResolveStringLog);
};
- void LogInternedString(const InternStringLog& log)
+ void LogInternedString(InternStringLog&& log)
REQUIRES(Locks::intern_table_lock_)
REQUIRES(!log_lock_);
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 8867743..6a20eaf 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -415,6 +415,22 @@
return result;
}
+std::string GetJniShortName(const std::string& class_descriptor, const std::string& method) {
+ // Remove the leading 'L' and trailing ';'...
+ std::string class_name(class_descriptor);
+ CHECK_EQ(class_name[0], 'L') << class_name;
+ CHECK_EQ(class_name[class_name.size() - 1], ';') << class_name;
+ class_name.erase(0, 1);
+ class_name.erase(class_name.size() - 1, 1);
+
+ std::string short_name;
+ short_name += "Java_";
+ short_name += MangleForJni(class_name);
+ short_name += "_";
+ short_name += MangleForJni(method);
+ return short_name;
+}
+
// See http://java.sun.com/j2se/1.5.0/docs/guide/jni/spec/design.html#wp615 for the full rules.
std::string MangleForJni(const std::string& s) {
std::string result;
@@ -788,49 +804,58 @@
*task_cpu = strtoull(fields[36].c_str(), nullptr, 10);
}
-const char* GetAndroidRoot() {
- const char* android_root = getenv("ANDROID_ROOT");
- if (android_root == nullptr) {
- if (OS::DirectoryExists("/system")) {
- android_root = "/system";
+static const char* GetAndroidDirSafe(const char* env_var,
+ const char* default_dir,
+ std::string* error_msg) {
+ const char* android_dir = getenv(env_var);
+ if (android_dir == nullptr) {
+ if (OS::DirectoryExists(default_dir)) {
+ android_dir = default_dir;
} else {
- LOG(FATAL) << "ANDROID_ROOT not set and /system does not exist";
- return "";
+ *error_msg = StringPrintf("%s not set and %s does not exist", env_var, default_dir);
+ return nullptr;
}
}
- if (!OS::DirectoryExists(android_root)) {
- LOG(FATAL) << "Failed to find ANDROID_ROOT directory " << android_root;
- return "";
+ if (!OS::DirectoryExists(android_dir)) {
+ *error_msg = StringPrintf("Failed to find %s directory %s", env_var, android_dir);
+ return nullptr;
}
- return android_root;
+ return android_dir;
}
-const char* GetAndroidData() {
+const char* GetAndroidDir(const char* env_var, const char* default_dir) {
std::string error_msg;
- const char* dir = GetAndroidDataSafe(&error_msg);
+ const char* dir = GetAndroidDirSafe(env_var, default_dir, &error_msg);
if (dir != nullptr) {
return dir;
} else {
LOG(FATAL) << error_msg;
- return "";
+ return nullptr;
}
}
+const char* GetAndroidRoot() {
+ return GetAndroidDir("ANDROID_ROOT", "/system");
+}
+
+const char* GetAndroidRootSafe(std::string* error_msg) {
+ return GetAndroidDirSafe("ANDROID_ROOT", "/system", error_msg);
+}
+
+const char* GetAndroidData() {
+ return GetAndroidDir("ANDROID_DATA", "/data");
+}
+
const char* GetAndroidDataSafe(std::string* error_msg) {
- const char* android_data = getenv("ANDROID_DATA");
- if (android_data == nullptr) {
- if (OS::DirectoryExists("/data")) {
- android_data = "/data";
- } else {
- *error_msg = "ANDROID_DATA not set and /data does not exist";
- return nullptr;
- }
+ return GetAndroidDirSafe("ANDROID_DATA", "/data", error_msg);
+}
+
+std::string GetDefaultBootImageLocation(std::string* error_msg) {
+ const char* android_root = GetAndroidRootSafe(error_msg);
+ if (android_root == nullptr) {
+ return "";
}
- if (!OS::DirectoryExists(android_data)) {
- *error_msg = StringPrintf("Failed to find ANDROID_DATA directory %s", android_data);
- return nullptr;
- }
- return android_data;
+ return StringPrintf("%s/framework/boot.art", android_root);
}
void GetDalvikCache(const char* subdir, const bool create_if_absent, std::string* dalvik_cache,
@@ -904,74 +929,6 @@
return filename;
}
-int ExecAndReturnCode(std::vector<std::string>& arg_vector, std::string* error_msg) {
- const std::string command_line(android::base::Join(arg_vector, ' '));
- CHECK_GE(arg_vector.size(), 1U) << command_line;
-
- // Convert the args to char pointers.
- const char* program = arg_vector[0].c_str();
- std::vector<char*> args;
- for (size_t i = 0; i < arg_vector.size(); ++i) {
- const std::string& arg = arg_vector[i];
- char* arg_str = const_cast<char*>(arg.c_str());
- CHECK(arg_str != nullptr) << i;
- args.push_back(arg_str);
- }
- args.push_back(nullptr);
-
- // fork and exec
- pid_t pid = fork();
- if (pid == 0) {
- // no allocation allowed between fork and exec
-
- // change process groups, so we don't get reaped by ProcessManager
- setpgid(0, 0);
-
- // (b/30160149): protect subprocesses from modifications to LD_LIBRARY_PATH, etc.
- // Use the snapshot of the environment from the time the runtime was created.
- char** envp = (Runtime::Current() == nullptr) ? nullptr : Runtime::Current()->GetEnvSnapshot();
- if (envp == nullptr) {
- execv(program, &args[0]);
- } else {
- execve(program, &args[0], envp);
- }
- PLOG(ERROR) << "Failed to execve(" << command_line << ")";
- // _exit to avoid atexit handlers in child.
- _exit(1);
- } else {
- if (pid == -1) {
- *error_msg = StringPrintf("Failed to execv(%s) because fork failed: %s",
- command_line.c_str(), strerror(errno));
- return -1;
- }
-
- // wait for subprocess to finish
- int status = -1;
- pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0));
- if (got_pid != pid) {
- *error_msg = StringPrintf("Failed after fork for execv(%s) because waitpid failed: "
- "wanted %d, got %d: %s",
- command_line.c_str(), pid, got_pid, strerror(errno));
- return -1;
- }
- if (WIFEXITED(status)) {
- return WEXITSTATUS(status);
- }
- return -1;
- }
-}
-
-bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) {
- int status = ExecAndReturnCode(arg_vector, error_msg);
- if (status != 0) {
- const std::string command_line(android::base::Join(arg_vector, ' '));
- *error_msg = StringPrintf("Failed execv(%s) because non-0 exit status",
- command_line.c_str());
- return false;
- }
- return true;
-}
-
bool FileExists(const std::string& filename) {
struct stat buffer;
return stat(filename.c_str(), &buffer) == 0;
diff --git a/runtime/utils.h b/runtime/utils.h
index 16ef706..67438b5 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -101,6 +101,8 @@
// of the JNI spec.
std::string MangleForJni(const std::string& s);
+std::string GetJniShortName(const std::string& class_name, const std::string& method_name);
+
// Turn "java.lang.String" into "Ljava/lang/String;".
std::string DotToDescriptor(const char* class_name);
@@ -143,12 +145,18 @@
// Find $ANDROID_ROOT, /system, or abort.
const char* GetAndroidRoot();
+// Find $ANDROID_ROOT, /system, or return null.
+const char* GetAndroidRootSafe(std::string* error_msg);
// Find $ANDROID_DATA, /data, or abort.
const char* GetAndroidData();
// Find $ANDROID_DATA, /data, or return null.
const char* GetAndroidDataSafe(std::string* error_msg);
+// Returns the default boot image location (ANDROID_ROOT/framework/boot.art).
+// Returns an empty string if ANDROID_ROOT is not set.
+std::string GetDefaultBootImageLocation(std::string* error_msg);
+
// Returns the dalvik-cache location, with subdir appended. Returns the empty string if the cache
// could not be found.
std::string GetDalvikCache(const char* subdir);
@@ -167,13 +175,6 @@
// Returns the system location for an image
std::string GetSystemImageFilename(const char* location, InstructionSet isa);
-// Wrapper on fork/execv to run a command in a subprocess.
-// Both of these spawn child processes using the environment as it was set when the single instance
-// of the runtime (Runtime::Current()) was started. If no instance of the runtime was started, it
-// will use the current environment settings.
-bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg);
-int ExecAndReturnCode(std::vector<std::string>& arg_vector, std::string* error_msg);
-
// Returns true if the file exists.
bool FileExists(const std::string& filename);
bool FileExistsAndNotEmpty(const std::string& filename);
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 82d92fc..02f1e1b 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -21,6 +21,7 @@
#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
+#include "exec_utils.h"
#include "mirror/array.h"
#include "mirror/array-inl.h"
#include "mirror/object-inl.h"
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index dabf8c8..2481c8b 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -49,10 +49,10 @@
DCHECK(IsVersionValid());
}
-VdexFile* VdexFile::Open(const std::string& vdex_filename,
- bool writable,
- bool low_4gb,
- std::string* error_msg) {
+std::unique_ptr<VdexFile> VdexFile::Open(const std::string& vdex_filename,
+ bool writable,
+ bool low_4gb,
+ std::string* error_msg) {
if (!OS::FileExists(vdex_filename.c_str())) {
*error_msg = "File " + vdex_filename + " does not exist.";
return nullptr;
@@ -79,12 +79,12 @@
return Open(vdex_file->Fd(), vdex_length, vdex_filename, writable, low_4gb, error_msg);
}
-VdexFile* VdexFile::Open(int file_fd,
- size_t vdex_length,
- const std::string& vdex_filename,
- bool writable,
- bool low_4gb,
- std::string* error_msg) {
+std::unique_ptr<VdexFile> VdexFile::Open(int file_fd,
+ size_t vdex_length,
+ const std::string& vdex_filename,
+ bool writable,
+ bool low_4gb,
+ std::string* error_msg) {
std::unique_ptr<MemMap> mmap(MemMap::MapFile(vdex_length,
writable ? PROT_READ | PROT_WRITE : PROT_READ,
MAP_SHARED,
@@ -98,8 +98,14 @@
return nullptr;
}
+ std::unique_ptr<VdexFile> vdex(new VdexFile(mmap.release()));
+ if (!vdex->IsValid()) {
+ *error_msg = "Vdex file is not valid";
+ return nullptr;
+ }
+
*error_msg = "Success";
- return new VdexFile(mmap.release());
+ return vdex;
}
const uint8_t* VdexFile::GetNextDexFileData(const uint8_t* cursor) const {
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index bb9844a..7daf2f8 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -61,7 +61,7 @@
private:
static constexpr uint8_t kVdexMagic[] = { 'v', 'd', 'e', 'x' };
- static constexpr uint8_t kVdexVersion[] = { '0', '0', '2', '\0' }; // Handle verify-profile
+ static constexpr uint8_t kVdexVersion[] = { '0', '0', '3', '\0' }; // Remove verify-profile
uint8_t magic_[4];
uint8_t version_[4];
@@ -73,17 +73,19 @@
typedef uint32_t VdexChecksum;
- static VdexFile* Open(const std::string& vdex_filename,
- bool writable,
- bool low_4gb,
- std::string* error_msg);
+ // Returns nullptr if the vdex file cannot be opened or is not valid.
+ static std::unique_ptr<VdexFile> Open(const std::string& vdex_filename,
+ bool writable,
+ bool low_4gb,
+ std::string* error_msg);
- static VdexFile* Open(int file_fd,
- size_t vdex_length,
- const std::string& vdex_filename,
- bool writable,
- bool low_4gb,
- std::string* error_msg);
+ // Returns nullptr if the vdex file cannot be opened or is not valid.
+ static std::unique_ptr<VdexFile> Open(int file_fd,
+ size_t vdex_length,
+ const std::string& vdex_filename,
+ bool writable,
+ bool low_4gb,
+ std::string* error_msg);
const uint8_t* Begin() const { return mmap_->Begin(); }
const uint8_t* End() const { return mmap_->End(); }
diff --git a/runtime/vdex_file_test.cc b/runtime/vdex_file_test.cc
new file mode 100644
index 0000000..909e117
--- /dev/null
+++ b/runtime/vdex_file_test.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "vdex_file.h"
+
+#include <string>
+
+#include <gtest/gtest.h>
+
+#include "common_runtime_test.h"
+
+namespace art {
+
+class VdexFileTest : public CommonRuntimeTest {
+};
+
+TEST_F(VdexFileTest, OpenEmptyVdex) {
+ // Verify we fail to open an empty vdex file.
+ ScratchFile tmp;
+ std::string error_msg;
+ std::unique_ptr<VdexFile> vdex = VdexFile::Open(tmp.GetFd(),
+ 0,
+ tmp.GetFilename(),
+ /*writable*/false,
+ /*low_4gb*/false,
+ &error_msg);
+ EXPECT_TRUE(vdex == nullptr);
+
+ vdex = VdexFile::Open(tmp.GetFilename(), /*writable*/false, /*low_4gb*/false, &error_msg);
+ EXPECT_TRUE(vdex == nullptr);
+}
+
+} // namespace art
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index b915457..5f55f3f 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -415,12 +415,12 @@
result.kind = kSoftFailure;
if (method != nullptr &&
!CanCompilerHandleVerificationFailure(verifier.encountered_failure_types_)) {
- method->AddAccessFlags(kAccCompileDontBother);
+ method->SetDontCompile();
}
}
if (method != nullptr) {
if (verifier.HasInstructionThatWillThrow()) {
- method->AddAccessFlags(kAccCompileDontBother);
+ method->SetDontCompile();
if (Runtime::Current()->IsAotCompiler() &&
(callbacks != nullptr) && !callbacks->IsBootImage()) {
// When compiling apps, make HasInstructionThatWillThrow a soft error to trigger
diff --git a/runtime/verify_object-inl.h b/runtime/verify_object-inl.h
index 43151dd..363fde2 100644
--- a/runtime/verify_object-inl.h
+++ b/runtime/verify_object-inl.h
@@ -19,33 +19,11 @@
#include "verify_object.h"
-#include "gc/heap.h"
#include "mirror/object-inl.h"
#include "obj_ptr-inl.h"
namespace art {
-inline void VerifyObject(ObjPtr<mirror::Object> obj) {
- if (kVerifyObjectSupport > kVerifyObjectModeDisabled && obj != nullptr) {
- if (kVerifyObjectSupport > kVerifyObjectModeFast) {
- // Slow object verification, try the heap right away.
- Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
- } else {
- // Fast object verification, only call the heap if our quick sanity tests fail. The heap will
- // print the diagnostic message.
- bool failed = !IsAligned<kObjectAlignment>(obj.Ptr());
- if (!failed) {
- mirror::Class* c = obj->GetClass<kVerifyNone>();
- failed = failed || !IsAligned<kObjectAlignment>(c);
- failed = failed || !VerifyClassClass(c);
- }
- if (UNLIKELY(failed)) {
- Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
- }
- }
- }
-}
-
inline bool VerifyClassClass(ObjPtr<mirror::Class> c) {
if (UNLIKELY(c == nullptr)) {
return false;
diff --git a/runtime/verify_object.cc b/runtime/verify_object.cc
new file mode 100644
index 0000000..a031a07
--- /dev/null
+++ b/runtime/verify_object.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "verify_object-inl.h"
+
+#include "base/bit_utils.h"
+#include "gc/heap.h"
+#include "globals.h"
+#include "mirror/object-inl.h"
+#include "obj_ptr-inl.h"
+#include "runtime.h"
+
+namespace art {
+
+void VerifyObjectImpl(ObjPtr<mirror::Object> obj) {
+ if (kVerifyObjectSupport > kVerifyObjectModeFast) {
+ // Slow object verification, try the heap right away.
+ Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
+ } else {
+ // Fast object verification, only call the heap if our quick sanity tests fail. The heap will
+ // print the diagnostic message.
+ bool failed = !IsAligned<kObjectAlignment>(obj.Ptr());
+ if (!failed) {
+ mirror::Class* c = obj->GetClass<kVerifyNone>();
+ failed = failed || !IsAligned<kObjectAlignment>(c);
+ failed = failed || !VerifyClassClass(c);
+ }
+ if (UNLIKELY(failed)) {
+ Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
+ }
+ }
+}
+
+} // namespace art
diff --git a/runtime/verify_object.h b/runtime/verify_object.h
index 384e56f..519f7f5 100644
--- a/runtime/verify_object.h
+++ b/runtime/verify_object.h
@@ -53,7 +53,16 @@
static constexpr VerifyObjectMode kVerifyObjectSupport =
kDefaultVerifyFlags != 0 ? kVerifyObjectModeFast : kVerifyObjectModeDisabled;
-ALWAYS_INLINE void VerifyObject(ObjPtr<mirror::Object> obj) NO_THREAD_SAFETY_ANALYSIS;
+// Implements the actual object checks.
+void VerifyObjectImpl(ObjPtr<mirror::Object> obj) NO_THREAD_SAFETY_ANALYSIS;
+
+// Is a front to optimize out any calls if no verification is enabled.
+ALWAYS_INLINE
+static inline void VerifyObject(ObjPtr<mirror::Object> obj) NO_THREAD_SAFETY_ANALYSIS {
+ if (kVerifyObjectSupport > kVerifyObjectModeDisabled && obj != nullptr) {
+ VerifyObjectImpl(obj);
+ }
+}
// Check that c.getClass() == c.getClass().getClass().
ALWAYS_INLINE bool VerifyClassClass(ObjPtr<mirror::Class> c) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 507ea16..2610252 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -51,7 +51,6 @@
jclass WellKnownClasses::java_lang_ClassNotFoundException;
jclass WellKnownClasses::java_lang_Daemons;
jclass WellKnownClasses::java_lang_Error;
-jclass WellKnownClasses::java_lang_ExceptionInInitializerError;
jclass WellKnownClasses::java_lang_invoke_MethodHandle;
jclass WellKnownClasses::java_lang_IllegalAccessError;
jclass WellKnownClasses::java_lang_NoClassDefFoundError;
@@ -290,7 +289,6 @@
java_lang_Object = CacheClass(env, "java/lang/Object");
java_lang_OutOfMemoryError = CacheClass(env, "java/lang/OutOfMemoryError");
java_lang_Error = CacheClass(env, "java/lang/Error");
- java_lang_ExceptionInInitializerError = CacheClass(env, "java/lang/ExceptionInInitializerError");
java_lang_IllegalAccessError = CacheClass(env, "java/lang/IllegalAccessError");
java_lang_invoke_MethodHandle = CacheClass(env, "java/lang/invoke/MethodHandle");
java_lang_NoClassDefFoundError = CacheClass(env, "java/lang/NoClassDefFoundError");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index b3ce3d1..db8a53c 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -61,7 +61,6 @@
static jclass java_lang_ClassNotFoundException;
static jclass java_lang_Daemons;
static jclass java_lang_Error;
- static jclass java_lang_ExceptionInInitializerError;
static jclass java_lang_IllegalAccessError;
static jclass java_lang_invoke_MethodHandle;
static jclass java_lang_NoClassDefFoundError;
diff --git a/test/004-NativeAllocations/src/Main.java b/test/004-NativeAllocations/src/Main.java
index 92f4e21..8712755 100644
--- a/test/004-NativeAllocations/src/Main.java
+++ b/test/004-NativeAllocations/src/Main.java
@@ -16,6 +16,7 @@
import java.lang.reflect.*;
import java.lang.Runtime;
+import dalvik.system.VMRuntime;
public class Main {
static Object nativeLock = new Object();
@@ -33,10 +34,19 @@
NativeAllocation(int bytes, boolean testingDeadlock) throws Exception {
this.bytes = bytes;
register_native_allocation.invoke(runtime, bytes);
+
+ // Register native allocation can only provide guarantees bounding
+ // the maximum outstanding allocations if finalizers don't time
+ // out. In case finalizers have timed out, wait longer for them
+ // now to complete so we can test the guarantees.
+ if (!testingDeadlock) {
+ VMRuntime.runFinalization(0);
+ }
+
synchronized (nativeLock) {
if (!testingDeadlock) {
nativeBytes += bytes;
- if (nativeBytes > maxMem) {
+ if (nativeBytes > 2 * maxMem) {
throw new OutOfMemoryError();
}
}
diff --git a/test/008-exceptions/expected.txt b/test/008-exceptions/expected.txt
index 083ecf7..fcf2ef4 100644
--- a/test/008-exceptions/expected.txt
+++ b/test/008-exceptions/expected.txt
@@ -1,11 +1,11 @@
Got an NPE: second throw
java.lang.NullPointerException: second throw
- at Main.catchAndRethrow(Main.java:77)
- at Main.exceptions_007(Main.java:59)
- at Main.main(Main.java:67)
+ at Main.catchAndRethrow(Main.java:94)
+ at Main.exceptions_007(Main.java:74)
+ at Main.main(Main.java:82)
Caused by: java.lang.NullPointerException: first throw
- at Main.throwNullPointerException(Main.java:84)
- at Main.catchAndRethrow(Main.java:74)
+ at Main.throwNullPointerException(Main.java:101)
+ at Main.catchAndRethrow(Main.java:91)
... 2 more
Static Init
BadError: This is bad by convention: BadInit
@@ -15,3 +15,11 @@
BadErrorNoStringInit: This is bad by convention
java.lang.NoClassDefFoundError: BadInitNoStringInit
BadErrorNoStringInit: This is bad by convention
+BadSuperClass Static Init
+BadError: This is bad by convention: BadInit
+MultiDexBadInit Static Init
+java.lang.Error: MultiDexBadInit
+java.lang.NoClassDefFoundError: MultiDexBadInit
+ cause: java.lang.Error: MultiDexBadInit
+java.lang.NoClassDefFoundError: MultiDexBadInit
+ cause: java.lang.Error: MultiDexBadInit
diff --git a/test/008-exceptions/multidex.jpp b/test/008-exceptions/multidex.jpp
new file mode 100644
index 0000000..a3746f5
--- /dev/null
+++ b/test/008-exceptions/multidex.jpp
@@ -0,0 +1,27 @@
+BadError:
+ @@com.android.jack.annotations.ForceInMainDex
+ class BadError
+BadInit:
+ @@com.android.jack.annotations.ForceInMainDex
+ class BadInit
+BadErrorNoStringInit:
+ @@com.android.jack.annotations.ForceInMainDex
+ class BadErrorNoStringInit
+BadInitNoStringInit:
+ @@com.android.jack.annotations.ForceInMainDex
+ class BadInitNoStringInit
+BadSuperClass:
+ @@com.android.jack.annotations.ForceInMainDex
+ class BadSuperClass
+DerivedFromBadSuperClass:
+ @@com.android.jack.annotations.ForceInMainDex
+ class DerivedFromBadSuperClass
+Main:
+ @@com.android.jack.annotations.ForceInMainDex
+ class Main
+MultiDexBadInit:
+ @@com.android.jack.annotations.ForceInMainDex
+ class MultiDexBadInit
+MultiDexBadInitWrapper1:
+ @@com.android.jack.annotations.ForceInMainDex
+ class MultiDexBadInitWrapper1
diff --git a/test/008-exceptions/src-multidex/MultiDexBadInitWrapper2.java b/test/008-exceptions/src-multidex/MultiDexBadInitWrapper2.java
new file mode 100644
index 0000000..f3953bd
--- /dev/null
+++ b/test/008-exceptions/src-multidex/MultiDexBadInitWrapper2.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class MultiDexBadInitWrapper2 {
+ public static void setDummy(int value) {
+ if (doThrow) { throw new Error(); }
+ MultiDexBadInit.dummy = value;
+ }
+
+ public static boolean doThrow = false;
+}
diff --git a/test/008-exceptions/src/Main.java b/test/008-exceptions/src/Main.java
index b8231f1..74af00c 100644
--- a/test/008-exceptions/src/Main.java
+++ b/test/008-exceptions/src/Main.java
@@ -50,6 +50,21 @@
}
}
+// A class that throws BadError during static initialization, serving as a super class.
+class BadSuperClass {
+ static int dummy;
+ static {
+ System.out.println("BadSuperClass Static Init");
+ if (true) {
+ throw new BadError("BadInit");
+ }
+ }
+}
+
+// A class that derives from BadSuperClass.
+class DerivedFromBadSuperClass extends BadSuperClass {
+}
+
/**
* Exceptions across method calls
*/
@@ -63,10 +78,12 @@
npe.printStackTrace(System.out);
}
}
- public static void main (String args[]) {
+ public static void main(String args[]) {
exceptions_007();
exceptionsRethrowClassInitFailure();
exceptionsRethrowClassInitFailureNoStringInit();
+ exceptionsForSuperClassInitFailure();
+ exceptionsInMultiDex();
}
private static void catchAndRethrow() {
@@ -129,4 +146,70 @@
error.printStackTrace(System.out);
}
}
+
+ private static void exceptionsForSuperClassInitFailure() {
+ try {
+ // Resolve DerivedFromBadSuperClass.
+ BadSuperClass.dummy = 1;
+ throw new IllegalStateException("Should not reach here.");
+ } catch (BadError e) {
+ System.out.println(e);
+ } catch (Throwable t) {
+ t.printStackTrace();
+ }
+ try {
+ // Before splitting mirror::Class::kStatusError into
+ // kStatusErrorUnresolved and kStatusErrorResolved,
+ // this would trigger a
+ // CHECK(super_class->IsResolved())
+ // failure in
+ // ClassLinker::LoadSuperAndInterfaces().
+ // After the change we're getting either VerifyError
+ // (for Optimizing) or NoClassDefFoundError wrapping
+ // BadError (for interpreter or JIT).
+ new DerivedFromBadSuperClass();
+ throw new IllegalStateException("Should not reach here.");
+ } catch (NoClassDefFoundError ncdfe) {
+ if (!(ncdfe.getCause() instanceof BadError)) {
+ ncdfe.getCause().printStackTrace();
+ }
+ } catch (VerifyError e) {
+ } catch (Throwable t) {
+ t.printStackTrace();
+ }
+ }
+
+ private static void exceptionsInMultiDex() {
+ try {
+ MultiDexBadInit.dummy = 1;
+ throw new IllegalStateException("Should not reach here.");
+ } catch (Error e) {
+ System.out.println(e);
+ } catch (Throwable t) {
+ t.printStackTrace();
+ }
+ // Before splitting mirror::Class::kStatusError into
+ // kStatusErrorUnresolved and kStatusErrorResolved,
+ // the exception from wrapper 1 would have been
+ // wrapped in NoClassDefFoundError but the exception
+ // from wrapper 2 would have been unwrapped.
+ try {
+ MultiDexBadInitWrapper1.setDummy(1);
+ throw new IllegalStateException("Should not reach here.");
+ } catch (NoClassDefFoundError ncdfe) {
+ System.out.println(ncdfe);
+ System.out.println(" cause: " + ncdfe.getCause());
+ } catch (Throwable t) {
+ t.printStackTrace();
+ }
+ try {
+ MultiDexBadInitWrapper2.setDummy(1);
+ throw new IllegalStateException("Should not reach here.");
+ } catch (NoClassDefFoundError ncdfe) {
+ System.out.println(ncdfe);
+ System.out.println(" cause: " + ncdfe.getCause());
+ } catch (Throwable t) {
+ t.printStackTrace();
+ }
+ }
}
diff --git a/test/008-exceptions/src/MultiDexBadInit.java b/test/008-exceptions/src/MultiDexBadInit.java
new file mode 100644
index 0000000..e3ebb9c
--- /dev/null
+++ b/test/008-exceptions/src/MultiDexBadInit.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class MultiDexBadInit {
+ static int dummy;
+ static {
+ System.out.println("MultiDexBadInit Static Init");
+ if (true) {
+ throw new Error("MultiDexBadInit");
+ }
+ }
+}
diff --git a/test/008-exceptions/src/MultiDexBadInitWrapper1.java b/test/008-exceptions/src/MultiDexBadInitWrapper1.java
new file mode 100644
index 0000000..059e6a3
--- /dev/null
+++ b/test/008-exceptions/src/MultiDexBadInitWrapper1.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class MultiDexBadInitWrapper1 {
+ public static void setDummy(int value) {
+ if (doThrow) { throw new Error(); }
+ MultiDexBadInit.dummy = value;
+ }
+
+ public static boolean doThrow = false;
+}
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index 06f193a..072f0e6 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -535,6 +535,8 @@
Assert.assertEquals(Math.min(0.0f, Float.MAX_VALUE), 0.0f);
Assert.assertEquals(Math.min(Float.MIN_VALUE, 0.0f), 0.0f);
Assert.assertEquals(Math.min(Float.MIN_VALUE, Float.MAX_VALUE), Float.MIN_VALUE);
+ // Should not have flush-to-zero behavior.
+ Assert.assertEquals(Math.min(Float.MIN_VALUE, Float.MIN_VALUE), Float.MIN_VALUE);
}
public static void test_Math_max_F() {
@@ -548,8 +550,10 @@
Assert.assertEquals(Math.max(1.0f, 0.0f), 1.0f);
Assert.assertEquals(Math.max(0.0f, 1.0f), 1.0f);
Assert.assertEquals(Math.max(0.0f, Float.MAX_VALUE), Float.MAX_VALUE);
- Assert.assertEquals(Math.max(Float.MIN_VALUE, 0.0f), Float.MIN_VALUE);
Assert.assertEquals(Math.max(Float.MIN_VALUE, Float.MAX_VALUE), Float.MAX_VALUE);
+ // Should not have flush-to-zero behavior.
+ Assert.assertEquals(Math.max(Float.MIN_VALUE, 0.0f), Float.MIN_VALUE);
+ Assert.assertEquals(Math.max(Float.MIN_VALUE, Float.MIN_VALUE), Float.MIN_VALUE);
}
public static void test_Math_min_D() {
@@ -565,6 +569,8 @@
Assert.assertEquals(Math.min(0.0d, Double.MAX_VALUE), 0.0d);
Assert.assertEquals(Math.min(Double.MIN_VALUE, 0.0d), 0.0d);
Assert.assertEquals(Math.min(Double.MIN_VALUE, Double.MAX_VALUE), Double.MIN_VALUE);
+ // Should not have flush-to-zero behavior.
+ Assert.assertEquals(Math.min(Double.MIN_VALUE, Double.MIN_VALUE), Double.MIN_VALUE);
}
public static void test_Math_max_D() {
@@ -580,6 +586,9 @@
Assert.assertEquals(Math.max(0.0d, Double.MAX_VALUE), Double.MAX_VALUE);
Assert.assertEquals(Math.max(Double.MIN_VALUE, 0.0d), Double.MIN_VALUE);
Assert.assertEquals(Math.max(Double.MIN_VALUE, Double.MAX_VALUE), Double.MAX_VALUE);
+ // Should not have flush-to-zero behavior.
+ Assert.assertEquals(Math.max(Double.MIN_VALUE, 0.0d), Double.MIN_VALUE);
+ Assert.assertEquals(Math.max(Double.MIN_VALUE, Double.MIN_VALUE), Double.MIN_VALUE);
}
public static void test_Math_sqrt() {
@@ -730,16 +739,19 @@
Math.rint(+2.1);
Assert.assertEquals(Math.rint(+0.0), +0.0d, 0.0);
Assert.assertEquals(Math.rint(-0.0), -0.0d, 0.0);
+ Assert.assertEquals(Math.rint(+0.5), +0.0d, 0.0); // expects tie-to-even
Assert.assertEquals(Math.rint(+2.0), +2.0d, 0.0);
Assert.assertEquals(Math.rint(+2.1), +2.0d, 0.0);
- Assert.assertEquals(Math.rint(+2.5), +2.0d, 0.0);
+ Assert.assertEquals(Math.rint(+2.5), +2.0d, 0.0); // expects tie-to-even
Assert.assertEquals(Math.rint(+2.9), +3.0d, 0.0);
Assert.assertEquals(Math.rint(+3.0), +3.0d, 0.0);
+ Assert.assertEquals(Math.rint(+3.5), +4.0d, 0.0); // expects tie-to-even
Assert.assertEquals(Math.rint(-2.0), -2.0d, 0.0);
Assert.assertEquals(Math.rint(-2.1), -2.0d, 0.0);
- Assert.assertEquals(Math.rint(-2.5), -2.0d, 0.0);
+ Assert.assertEquals(Math.rint(-2.5), -2.0d, 0.0); // expects tie-to-even
Assert.assertEquals(Math.rint(-2.9), -3.0d, 0.0);
Assert.assertEquals(Math.rint(-3.0), -3.0d, 0.0);
+ Assert.assertEquals(Math.rint(-3.5), -4.0d, 0.0); // expects tie-to-even
// 2^52 - 1.5
Assert.assertEquals(Math.rint(Double.longBitsToDouble(0x432FFFFFFFFFFFFDl)),
Double.longBitsToDouble(0x432FFFFFFFFFFFFCl), 0.0);
diff --git a/test/142-classloader2/expected.txt b/test/142-classloader2/expected.txt
index 86f5e22..056d978 100644
--- a/test/142-classloader2/expected.txt
+++ b/test/142-classloader2/expected.txt
@@ -1 +1,5 @@
+Loaded class B.
+Caught VerifyError.
+Loaded class B.
+Caught wrapped VerifyError.
Everything OK.
diff --git a/test/142-classloader2/src/Main.java b/test/142-classloader2/src/Main.java
index 80b00e7..a0c7764 100644
--- a/test/142-classloader2/src/Main.java
+++ b/test/142-classloader2/src/Main.java
@@ -74,16 +74,25 @@
// Try to load a dex file with bad dex code. Use new instance to force verification.
try {
Class<?> badClass = Main.class.getClassLoader().loadClass("B");
+ System.out.println("Loaded class B.");
badClass.newInstance();
- System.out.println("Should not be able to load class from bad dex file.");
+ System.out.println("Should not be able to instantiate B with bad dex bytecode.");
} catch (VerifyError e) {
+ System.out.println("Caught VerifyError.");
}
// Make sure the same error is rethrown when reloading the bad class.
try {
Class<?> badClass = Main.class.getClassLoader().loadClass("B");
- System.out.println("Should not be able to load class from bad dex file.");
- } catch (VerifyError e) {
+ System.out.println("Loaded class B.");
+ badClass.newInstance();
+ System.out.println("Should not be able to instantiate B with bad dex bytecode.");
+ } catch (NoClassDefFoundError e) {
+ if (e.getCause() instanceof VerifyError) {
+ System.out.println("Caught wrapped VerifyError.");
+ } else {
+ e.printStackTrace();
+ }
}
System.out.println("Everything OK.");
diff --git a/test/154-gc-loop/expected.txt b/test/154-gc-loop/expected.txt
new file mode 100644
index 0000000..6106818
--- /dev/null
+++ b/test/154-gc-loop/expected.txt
@@ -0,0 +1,2 @@
+JNI_OnLoad called
+Finalize count too large: false
diff --git a/test/154-gc-loop/heap_interface.cc b/test/154-gc-loop/heap_interface.cc
new file mode 100644
index 0000000..8d610a8
--- /dev/null
+++ b/test/154-gc-loop/heap_interface.cc
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc/heap.h"
+#include "runtime.h"
+
+namespace art {
+namespace {
+
+extern "C" JNIEXPORT void JNICALL Java_Main_backgroundProcessState(JNIEnv*, jclass) {
+ Runtime::Current()->UpdateProcessState(kProcessStateJankImperceptible);
+}
+
+} // namespace
+} // namespace art
diff --git a/test/154-gc-loop/info.txt b/test/154-gc-loop/info.txt
new file mode 100644
index 0000000..f599db1
--- /dev/null
+++ b/test/154-gc-loop/info.txt
@@ -0,0 +1 @@
+Test that GC doesn't happen too often for a few small allocations.
diff --git a/test/154-gc-loop/src/Main.java b/test/154-gc-loop/src/Main.java
new file mode 100644
index 0000000..3a256c1
--- /dev/null
+++ b/test/154-gc-loop/src/Main.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.ref.WeakReference;
+
+public class Main {
+ static final class GcWatcher {
+ protected void finalize() throws Throwable {
+ watcher = new WeakReference<GcWatcher>(new GcWatcher());
+ ++finalizeCounter;
+ }
+ }
+ static WeakReference<GcWatcher> watcher = new WeakReference<GcWatcher>(new GcWatcher());
+ static Object o = new Object();
+ static int finalizeCounter = 0;
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+ backgroundProcessState();
+ try {
+ Runtime.getRuntime().gc();
+ for (int i = 0; i < 10; ++i) {
+ o = new Object();
+ Thread.sleep(1000);
+ }
+ } catch (Exception e) {}
+ System.out.println("Finalize count too large: " +
+ ((finalizeCounter >= 10) ? Integer.toString(finalizeCounter) : "false"));
+ }
+
+ private static native void backgroundProcessState();
+}
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index d3a033b..6cea673 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -47,7 +47,7 @@
uint32_t value = 0;
if (GetCurrentQuickFrame() != nullptr &&
GetCurrentOatQuickMethodHeader()->IsOptimized() &&
- !Runtime::Current()->IsDebuggable()) {
+ !Runtime::Current()->IsJavaDebuggable()) {
CHECK_EQ(GetVReg(m, dex_register_of_first_parameter, kIntVReg, &value), false);
} else {
CHECK(GetVReg(m, dex_register_of_first_parameter, kIntVReg, &value));
diff --git a/test/482-checker-loop-back-edge-use/src/Main.java b/test/482-checker-loop-back-edge-use/src/Main.java
index 65dfd41..86977d1 100644
--- a/test/482-checker-loop-back-edge-use/src/Main.java
+++ b/test/482-checker-loop-back-edge-use/src/Main.java
@@ -164,6 +164,12 @@
}
}
+
+ static boolean $opt$noinline$ensureSideEffects() {
+ if (doThrow) throw new Error("");
+ return true;
+ }
+
/// CHECK-START: void Main.loop9() liveness (after)
/// CHECK: <<Arg:z\d+>> StaticFieldGet liveness:<<ArgLiv:\d+>> ranges:{[<<ArgLiv>>,<<ArgLoopUse:\d+>>)} uses:[<<ArgUse:\d+>>,<<ArgLoopUse>>]
/// CHECK: If [<<Arg>>] liveness:<<IfLiv:\d+>>
@@ -178,7 +184,7 @@
// Add some code at entry to avoid having the entry block be a pre header.
// This avoids having to create a synthesized block.
System.out.println("Enter");
- while (Runtime.getRuntime() != null) {
+ while ($opt$noinline$ensureSideEffects()) {
// 'incoming' must only have a use in the inner loop.
boolean incoming = field;
while (incoming) {}
@@ -189,4 +195,5 @@
}
static boolean field;
+ static boolean doThrow = false;
}
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index db43768..dd77423 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -52,7 +52,6 @@
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
/// CHECK-START-MIPS: int Main.testSimple(int) sharpening (after)
- /// CHECK-NOT: MipsDexCacheArraysBase
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
/// CHECK-START-MIPS64: int Main.testSimple(int) sharpening (after)
@@ -69,10 +68,6 @@
/// CHECK: ArmDexCacheArraysBase
/// CHECK-NOT: ArmDexCacheArraysBase
- /// CHECK-START-MIPS: int Main.testSimple(int) dex_cache_array_fixups_mips (after)
- /// CHECK: MipsDexCacheArraysBase
- /// CHECK-NOT: MipsDexCacheArraysBase
-
/// CHECK-START-X86: int Main.testSimple(int) pc_relative_fixups_x86 (after)
/// CHECK: X86ComputeBaseMethodAddress
/// CHECK-NOT: X86ComputeBaseMethodAddress
@@ -95,7 +90,6 @@
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
/// CHECK-START-MIPS: int Main.testDiamond(boolean, int) sharpening (after)
- /// CHECK-NOT: MipsDexCacheArraysBase
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
@@ -120,14 +114,6 @@
/// CHECK: ArmDexCacheArraysBase
/// CHECK-NEXT: If
- /// CHECK-START-MIPS: int Main.testDiamond(boolean, int) dex_cache_array_fixups_mips (after)
- /// CHECK: MipsDexCacheArraysBase
- /// CHECK-NOT: MipsDexCacheArraysBase
-
- /// CHECK-START-MIPS: int Main.testDiamond(boolean, int) dex_cache_array_fixups_mips (after)
- /// CHECK: MipsDexCacheArraysBase
- /// CHECK-NEXT: If
-
/// CHECK-START-X86: int Main.testDiamond(boolean, int) pc_relative_fixups_x86 (after)
/// CHECK: X86ComputeBaseMethodAddress
/// CHECK-NOT: X86ComputeBaseMethodAddress
@@ -182,24 +168,6 @@
/// CHECK: begin_block
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
- /// CHECK-START-MIPS: int Main.testLoop(int[], int) dex_cache_array_fixups_mips (before)
- /// CHECK-NOT: MipsDexCacheArraysBase
-
- /// CHECK-START-MIPS: int Main.testLoop(int[], int) dex_cache_array_fixups_mips (after)
- /// CHECK: MipsDexCacheArraysBase
- /// CHECK-NOT: MipsDexCacheArraysBase
-
- /// CHECK-START-MIPS: int Main.testLoop(int[], int) dex_cache_array_fixups_mips (after)
- /// CHECK: InvokeStaticOrDirect
- /// CHECK-NOT: InvokeStaticOrDirect
-
- /// CHECK-START-MIPS: int Main.testLoop(int[], int) dex_cache_array_fixups_mips (after)
- /// CHECK: ArrayLength
- /// CHECK-NEXT: MipsDexCacheArraysBase
- /// CHECK-NEXT: Goto
- /// CHECK: begin_block
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
-
public static int testLoop(int[] array, int x) {
// PC-relative bases used by ARM, MIPS and X86 should be pulled before the loop.
for (int i : array) {
@@ -228,16 +196,6 @@
/// CHECK-NEXT: ArmDexCacheArraysBase
/// CHECK-NEXT: Goto
- /// CHECK-START-MIPS: int Main.testLoopWithDiamond(int[], boolean, int) dex_cache_array_fixups_mips (before)
- /// CHECK-NOT: MipsDexCacheArraysBase
-
- /// CHECK-START-MIPS: int Main.testLoopWithDiamond(int[], boolean, int) dex_cache_array_fixups_mips (after)
- /// CHECK: If
- /// CHECK: begin_block
- /// CHECK: ArrayLength
- /// CHECK-NEXT: MipsDexCacheArraysBase
- /// CHECK-NEXT: Goto
-
public static int testLoopWithDiamond(int[] array, boolean negate, int x) {
// PC-relative bases used by ARM, MIPS and X86 should be pulled before the loop
// but not outside the if.
@@ -325,9 +283,6 @@
return "non-boot-image-string";
}
- /// CHECK-START: java.lang.Class Main.$noinline$getStringClass() sharpening (before)
- /// CHECK: LoadClass load_kind:DexCacheViaMethod class_name:java.lang.String
-
/// CHECK-START-X86: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
@@ -365,9 +320,6 @@
return String.class;
}
- /// CHECK-START: java.lang.Class Main.$noinline$getOtherClass() sharpening (before)
- /// CHECK: LoadClass load_kind:DexCacheViaMethod class_name:Other
-
/// CHECK-START-X86: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
/// CHECK: LoadClass load_kind:BssEntry class_name:Other
@@ -381,20 +333,12 @@
/// CHECK-START-ARM: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
/// CHECK: LoadClass load_kind:BssEntry class_name:Other
- /// CHECK-START-ARM: java.lang.Class Main.$noinline$getOtherClass() dex_cache_array_fixups_arm (after)
- /// CHECK-DAG: ArmDexCacheArraysBase
- /// CHECK-DAG: LoadClass load_kind:BssEntry class_name:Other
-
/// CHECK-START-ARM64: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
/// CHECK: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-MIPS: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
/// CHECK: LoadClass load_kind:BssEntry class_name:Other
- /// CHECK-START-MIPS: java.lang.Class Main.$noinline$getOtherClass() dex_cache_array_fixups_mips (after)
- /// CHECK-DAG: MipsDexCacheArraysBase
- /// CHECK-DAG: LoadClass load_kind:BssEntry class_name:Other
-
/// CHECK-START-MIPS64: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
/// CHECK: LoadClass load_kind:BssEntry class_name:Other
diff --git a/test/616-cha-abstract/expected.txt b/test/616-cha-abstract/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/616-cha-abstract/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/616-cha-abstract/info.txt b/test/616-cha-abstract/info.txt
new file mode 100644
index 0000000..4f7e013
--- /dev/null
+++ b/test/616-cha-abstract/info.txt
@@ -0,0 +1 @@
+Test for Class Hierarchy Analysis (CHA) on abstract method.
diff --git a/test/616-cha-abstract/run b/test/616-cha-abstract/run
new file mode 100644
index 0000000..d8b4f0d
--- /dev/null
+++ b/test/616-cha-abstract/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Run without an app image to prevent the classes to be loaded at startup.
+exec ${RUN} "${@}" --no-app-image
diff --git a/test/616-cha-abstract/src/Main.java b/test/616-cha-abstract/src/Main.java
new file mode 100644
index 0000000..e1d7db1
--- /dev/null
+++ b/test/616-cha-abstract/src/Main.java
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+abstract class Base {
+ abstract void foo(int i);
+
+ void printError(String msg) {
+ System.out.println(msg);
+ }
+}
+
+class Main1 extends Base {
+ void foo(int i) {
+ if (i != 1) {
+ printError("error1");
+ }
+ }
+}
+
+class Main2 extends Main1 {
+ void foo(int i) {
+ if (i != 2) {
+ printError("error2");
+ }
+ }
+}
+
+public class Main {
+ static Main1 sMain1;
+ static Main1 sMain2;
+
+ static boolean sIsOptimizing = true;
+ static boolean sHasJIT = true;
+ static volatile boolean sOtherThreadStarted;
+
+ private static void assertSingleImplementation(Class<?> clazz, String method_name, boolean b) {
+ if (hasSingleImplementation(clazz, method_name) != b) {
+ System.out.println(clazz + "." + method_name +
+ " doesn't have single implementation value of " + b);
+ }
+ }
+
+ // sMain1.foo() will be always be Main1.foo() before Main2 is loaded/linked.
+ // So sMain1.foo() can be devirtualized to Main1.foo() and be inlined.
+ // After Dummy.createMain2() which links in Main2, live testOverride() on stack
+ // should be deoptimized.
+ static void testOverride(boolean createMain2, boolean wait, boolean setHasJIT) {
+ if (setHasJIT) {
+ if (isInterpreted()) {
+ sHasJIT = false;
+ }
+ return;
+ }
+
+ if (createMain2 && (sIsOptimizing || sHasJIT)) {
+ assertIsManaged();
+ }
+
+ sMain1.foo(sMain1.getClass() == Main1.class ? 1 : 2);
+
+ if (createMain2) {
+ // Wait for the other thread to start.
+ while (!sOtherThreadStarted);
+ // Create an Main2 instance and assign it to sMain2.
+ // sMain1 is kept the same.
+ sMain2 = Dummy.createMain2();
+ // Wake up the other thread.
+ synchronized(Main.class) {
+ Main.class.notify();
+ }
+ } else if (wait) {
+ // This is the other thread.
+ synchronized(Main.class) {
+ sOtherThreadStarted = true;
+ // Wait for Main2 to be linked and deoptimization is triggered.
+ try {
+ Main.class.wait();
+ } catch (Exception e) {
+ }
+ }
+ }
+
+ // There should be a deoptimization here right after Main2 is linked by
+ // calling Dummy.createMain2(), even though sMain1 didn't change.
+ // The behavior here would be different if inline-cache is used, which
+ // doesn't deoptimize since sMain1 still hits the type cache.
+ sMain1.foo(sMain1.getClass() == Main1.class ? 1 : 2);
+ if ((createMain2 || wait) && sHasJIT && !sIsOptimizing) {
+ // This method should be deoptimized right after Main2 is created.
+ assertIsInterpreted();
+ }
+
+ if (sMain2 != null) {
+ sMain2.foo(sMain2.getClass() == Main1.class ? 1 : 2);
+ }
+ }
+
+ // Test scenarios under which CHA-based devirtualization happens,
+ // and class loading that overrides a method can invalidate compiled code.
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+
+ if (isInterpreted()) {
+ sIsOptimizing = false;
+ }
+
+ // sMain1 is an instance of Main1. Main2 hasn't bee loaded yet.
+ sMain1 = new Main1();
+
+ ensureJitCompiled(Main.class, "testOverride");
+ testOverride(false, false, true);
+
+ if (sHasJIT && !sIsOptimizing) {
+ assertSingleImplementation(Base.class, "foo", true);
+ assertSingleImplementation(Main1.class, "foo", true);
+ } else {
+ // Main2 is verified ahead-of-time so it's linked in already.
+ }
+
+ // Create another thread that also calls sMain1.foo().
+ // Try to test suspend and deopt another thread.
+ new Thread() {
+ public void run() {
+ testOverride(false, true, false);
+ }
+ }.start();
+
+ // This will create Main2 instance in the middle of testOverride().
+ testOverride(true, false, false);
+ assertSingleImplementation(Base.class, "foo", false);
+ assertSingleImplementation(Main1.class, "foo", false);
+ }
+
+ private static native void ensureJitCompiled(Class<?> itf, String method_name);
+ private static native void assertIsInterpreted();
+ private static native void assertIsManaged();
+ private static native boolean isInterpreted();
+ private static native boolean hasSingleImplementation(Class<?> clazz, String method_name);
+}
+
+// Put createMain2() in another class to avoid class loading due to verifier.
+class Dummy {
+ static Main1 createMain2() {
+ return new Main2();
+ }
+}
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index 7cc0b8b..7509d9b 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -154,8 +154,8 @@
/// CHECK-NOT: Phi
//
/// CHECK-START: int Main.polynomialInt() instruction_simplifier$after_bce (after)
- /// CHECK-DAG: <<Int:i\d+>> IntConstant -45 loop:none
- /// CHECK-DAG: Return [<<Int>>] loop:none
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant -45 loop:none
+ /// CHECK-DAG: Return [<<Int>>] loop:none
static int polynomialInt() {
int x = 0;
for (int i = 0; i < 10; i++) {
@@ -164,6 +164,81 @@
return x;
}
+ // Regression test for b/34779592 (found with fuzz testing): overflow for last value
+ // of division truncates to zero, for multiplication it simply truncates.
+ //
+ /// CHECK-START: int Main.geoIntDivLastValue(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.geoIntDivLastValue(int) loop_optimization (after)
+ /// CHECK-NOT: Phi
+ //
+ /// CHECK-START: int Main.geoIntDivLastValue(int) instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: Return [<<Int>>] loop:none
+ static int geoIntDivLastValue(int x) {
+ for (int i = 0; i < 2; i++) {
+ x /= 1081788608;
+ }
+ return x;
+ }
+
+ /// CHECK-START: int Main.geoIntMulLastValue(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.geoIntMulLastValue(int) loop_optimization (after)
+ /// CHECK-NOT: Phi
+ //
+ /// CHECK-START: int Main.geoIntMulLastValue(int) instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Par:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant -194211840 loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<Par>>,<<Int>>] loop:none
+ /// CHECK-DAG: Return [<<Mul>>] loop:none
+ static int geoIntMulLastValue(int x) {
+ for (int i = 0; i < 2; i++) {
+ x *= 1081788608;
+ }
+ return x;
+ }
+
+ /// CHECK-START: long Main.geoLongDivLastValue(long) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: long Main.geoLongDivLastValue(long) loop_optimization (after)
+ /// CHECK-NOT: Phi
+ //
+ /// CHECK-START: long Main.geoLongDivLastValue(long) instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Long:j\d+>> LongConstant 0 loop:none
+ /// CHECK-DAG: Return [<<Long>>] loop:none
+ static long geoLongDivLastValue(long x) {
+ for (int i = 0; i < 10; i++) {
+ x /= 1081788608;
+ }
+ return x;
+ }
+
+ /// CHECK-START: long Main.geoLongMulLastValue(long) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: long Main.geoLongMulLastValue(long) loop_optimization (after)
+ /// CHECK-NOT: Phi
+ //
+ /// CHECK-START: long Main.geoLongMulLastValue(long) instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Par:j\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Long:j\d+>> LongConstant -8070450532247928832 loop:none
+ /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Par>>,<<Long>>] loop:none
+ /// CHECK-DAG: Return [<<Mul>>] loop:none
+ static long geoLongMulLastValue(long x) {
+ for (int i = 0; i < 10; i++) {
+ x *= 1081788608;
+ }
+ return x;
+ }
+
public static void main(String[] args) {
expectEquals(10, earlyExitFirst(-1));
for (int i = 0; i <= 10; i++) {
@@ -185,6 +260,42 @@
expectEquals(-45, polynomialIntFromLong());
expectEquals(-45, polynomialInt());
+ expectEquals(0, geoIntDivLastValue(0));
+ expectEquals(0, geoIntDivLastValue(1));
+ expectEquals(0, geoIntDivLastValue(2));
+ expectEquals(0, geoIntDivLastValue(1081788608));
+ expectEquals(0, geoIntDivLastValue(-1081788608));
+ expectEquals(0, geoIntDivLastValue(2147483647));
+ expectEquals(0, geoIntDivLastValue(-2147483648));
+
+ expectEquals( 0, geoIntMulLastValue(0));
+ expectEquals( -194211840, geoIntMulLastValue(1));
+ expectEquals( -388423680, geoIntMulLastValue(2));
+ expectEquals(-1041498112, geoIntMulLastValue(1081788608));
+ expectEquals( 1041498112, geoIntMulLastValue(-1081788608));
+ expectEquals( 194211840, geoIntMulLastValue(2147483647));
+ expectEquals( 0, geoIntMulLastValue(-2147483648));
+
+ expectEquals(0L, geoLongDivLastValue(0L));
+ expectEquals(0L, geoLongDivLastValue(1L));
+ expectEquals(0L, geoLongDivLastValue(2L));
+ expectEquals(0L, geoLongDivLastValue(1081788608L));
+ expectEquals(0L, geoLongDivLastValue(-1081788608L));
+ expectEquals(0L, geoLongDivLastValue(2147483647L));
+ expectEquals(0L, geoLongDivLastValue(-2147483648L));
+ expectEquals(0L, geoLongDivLastValue(9223372036854775807L));
+ expectEquals(0L, geoLongDivLastValue(-9223372036854775808L));
+
+ expectEquals( 0L, geoLongMulLastValue(0L));
+ expectEquals(-8070450532247928832L, geoLongMulLastValue(1L));
+ expectEquals( 2305843009213693952L, geoLongMulLastValue(2L));
+ expectEquals( 0L, geoLongMulLastValue(1081788608L));
+ expectEquals( 0L, geoLongMulLastValue(-1081788608L));
+ expectEquals( 8070450532247928832L, geoLongMulLastValue(2147483647L));
+ expectEquals( 0L, geoLongMulLastValue(-2147483648L));
+ expectEquals( 8070450532247928832L, geoLongMulLastValue(9223372036854775807L));
+ expectEquals( 0L, geoLongMulLastValue(-9223372036854775808L));
+
System.out.println("passed");
}
@@ -193,4 +304,10 @@
throw new Error("Expected: " + expected + ", found: " + result);
}
}
+
+ private static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
}
diff --git a/test/626-checker-arm64-scratch-register/src/Main.java b/test/626-checker-arm64-scratch-register/src/Main.java
index aa211be..6dd4374 100644
--- a/test/626-checker-arm64-scratch-register/src/Main.java
+++ b/test/626-checker-arm64-scratch-register/src/Main.java
@@ -95,8 +95,8 @@
/// CHECK: str s1, [sp, #28]
/// CHECK: ldr s1, [sp, #32]
/// CHECK: str s31, [sp, #32]
- /// CHECK: ldr w16, [sp, #20]
- /// CHECK: str w16, [sp, #40]
+ /// CHECK: ldr s31, [sp, #20]
+ /// CHECK: str s31, [sp, #40]
/// CHECK: str s12, [sp, #20]
/// CHECK: fmov d12, d11
/// CHECK: fmov d11, d10
diff --git a/test/626-const-class-linking/clear_dex_cache_types.cc b/test/626-const-class-linking/clear_dex_cache_types.cc
index b035896..c0aedc1 100644
--- a/test/626-const-class-linking/clear_dex_cache_types.cc
+++ b/test/626-const-class-linking/clear_dex_cache_types.cc
@@ -15,6 +15,9 @@
*/
#include "jni.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache-inl.h"
#include "object_lock.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/test/635-checker-arm64-volatile-load-cc/expected.txt b/test/635-checker-arm64-volatile-load-cc/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/635-checker-arm64-volatile-load-cc/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/635-checker-arm64-volatile-load-cc/info.txt b/test/635-checker-arm64-volatile-load-cc/info.txt
new file mode 100644
index 0000000..5d67df4
--- /dev/null
+++ b/test/635-checker-arm64-volatile-load-cc/info.txt
@@ -0,0 +1,3 @@
+Regression test checking that the VIXL ARM64 scratch register pool is
+not exhausted when generating a volatile field load with a large
+offset with (Baker) read barriers (b/34726333).
diff --git a/test/635-checker-arm64-volatile-load-cc/src/Main.java b/test/635-checker-arm64-volatile-load-cc/src/Main.java
new file mode 100644
index 0000000..6a26e94
--- /dev/null
+++ b/test/635-checker-arm64-volatile-load-cc/src/Main.java
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ static volatile Object s000, s001, s002, s003, s004, s005, s006, s007, s008, s009;
+ static volatile Object s010, s011, s012, s013, s014, s015, s016, s017, s018, s019;
+ static volatile Object s020, s021, s022, s023, s024, s025, s026, s027, s028, s029;
+ static volatile Object s030, s031, s032, s033, s034, s035, s036, s037, s038, s039;
+ static volatile Object s040, s041, s042, s043, s044, s045, s046, s047, s048, s049;
+ static volatile Object s050, s051, s052, s053, s054, s055, s056, s057, s058, s059;
+ static volatile Object s060, s061, s062, s063, s064, s065, s066, s067, s068, s069;
+ static volatile Object s070, s071, s072, s073, s074, s075, s076, s077, s078, s079;
+ static volatile Object s080, s081, s082, s083, s084, s085, s086, s087, s088, s089;
+ static volatile Object s090, s091, s092, s093, s094, s095, s096, s097, s098, s099;
+
+ static volatile Object s100, s101, s102, s103, s104, s105, s106, s107, s108, s109;
+ static volatile Object s110, s111, s112, s113, s114, s115, s116, s117, s118, s119;
+ static volatile Object s120, s121, s122, s123, s124, s125, s126, s127, s128, s129;
+ static volatile Object s130, s131, s132, s133, s134, s135, s136, s137, s138, s139;
+ static volatile Object s140, s141, s142, s143, s144, s145, s146, s147, s148, s149;
+ static volatile Object s150, s151, s152, s153, s154, s155, s156, s157, s158, s159;
+ static volatile Object s160, s161, s162, s163, s164, s165, s166, s167, s168, s169;
+ static volatile Object s170, s171, s172, s173, s174, s175, s176, s177, s178, s179;
+ static volatile Object s180, s181, s182, s183, s184, s185, s186, s187, s188, s189;
+ static volatile Object s190, s191, s192, s193, s194, s195, s196, s197, s198, s199;
+
+ static volatile Object s200, s201, s202, s203, s204, s205, s206, s207, s208, s209;
+ static volatile Object s210, s211, s212, s213, s214, s215, s216, s217, s218, s219;
+ static volatile Object s220, s221, s222, s223, s224, s225, s226, s227, s228, s229;
+ static volatile Object s230, s231, s232, s233, s234, s235, s236, s237, s238, s239;
+ static volatile Object s240, s241, s242, s243, s244, s245, s246, s247, s248, s249;
+ static volatile Object s250, s251, s252, s253, s254, s255, s256, s257, s258, s259;
+ static volatile Object s260, s261, s262, s263, s264, s265, s266, s267, s268, s269;
+ static volatile Object s270, s271, s272, s273, s274, s275, s276, s277, s278, s279;
+ static volatile Object s280, s281, s282, s283, s284, s285, s286, s287, s288, s289;
+ static volatile Object s290, s291, s292, s293, s294, s295, s296, s297, s298, s299;
+
+ static volatile Object s300, s301, s302, s303, s304, s305, s306, s307, s308, s309;
+ static volatile Object s310, s311, s312, s313, s314, s315, s316, s317, s318, s319;
+ static volatile Object s320, s321, s322, s323, s324, s325, s326, s327, s328, s329;
+ static volatile Object s330, s331, s332, s333, s334, s335, s336, s337, s338, s339;
+ static volatile Object s340, s341, s342, s343, s344, s345, s346, s347, s348, s349;
+ static volatile Object s350, s351, s352, s353, s354, s355, s356, s357, s358, s359;
+ static volatile Object s360, s361, s362, s363, s364, s365, s366, s367, s368, s369;
+ static volatile Object s370, s371, s372, s373, s374, s375, s376, s377, s378, s379;
+ static volatile Object s380, s381, s382, s383, s384, s385, s386, s387, s388, s389;
+ static volatile Object s390, s391, s392, s393, s394, s395, s396, s397, s398, s399;
+
+ static volatile Object s400, s401, s402, s403, s404, s405, s406, s407, s408, s409;
+ static volatile Object s410, s411, s412, s413, s414, s415, s416, s417, s418, s419;
+ static volatile Object s420, s421, s422, s423, s424, s425, s426, s427, s428, s429;
+ static volatile Object s430, s431, s432, s433, s434, s435, s436, s437, s438, s439;
+ static volatile Object s440, s441, s442, s443, s444, s445, s446, s447, s448, s449;
+ static volatile Object s450, s451, s452, s453, s454, s455, s456, s457, s458, s459;
+ static volatile Object s460, s461, s462, s463, s464, s465, s466, s467, s468, s469;
+ static volatile Object s470, s471, s472, s473, s474, s475, s476, s477, s478, s479;
+ static volatile Object s480, s481, s482, s483, s484, s485, s486, s487, s488, s489;
+ static volatile Object s490, s491, s492, s493, s494, s495, s496, s497, s498, s499;
+
+ static volatile Object s500, s501, s502, s503, s504, s505, s506, s507, s508, s509;
+ static volatile Object s510, s511, s512, s513, s514, s515, s516, s517, s518, s519;
+ static volatile Object s520, s521, s522, s523, s524, s525, s526, s527, s528, s529;
+ static volatile Object s530, s531, s532, s533, s534, s535, s536, s537, s538, s539;
+ static volatile Object s540, s541, s542, s543, s544, s545, s546, s547, s548, s549;
+ static volatile Object s550, s551, s552, s553, s554, s555, s556, s557, s558, s559;
+ static volatile Object s560, s561, s562, s563, s564, s565, s566, s567, s568, s569;
+ static volatile Object s570, s571, s572, s573, s574, s575, s576, s577, s578, s579;
+ static volatile Object s580, s581, s582, s583, s584, s585, s586, s587, s588, s589;
+ static volatile Object s590, s591, s592, s593, s594, s595, s596, s597, s598, s599;
+
+ static volatile Object s600, s601, s602, s603, s604, s605, s606, s607, s608, s609;
+ static volatile Object s610, s611, s612, s613, s614, s615, s616, s617, s618, s619;
+ static volatile Object s620, s621, s622, s623, s624, s625, s626, s627, s628, s629;
+ static volatile Object s630, s631, s632, s633, s634, s635, s636, s637, s638, s639;
+ static volatile Object s640, s641, s642, s643, s644, s645, s646, s647, s648, s649;
+ static volatile Object s650, s651, s652, s653, s654, s655, s656, s657, s658, s659;
+ static volatile Object s660, s661, s662, s663, s664, s665, s666, s667, s668, s669;
+ static volatile Object s670, s671, s672, s673, s674, s675, s676, s677, s678, s679;
+ static volatile Object s680, s681, s682, s683, s684, s685, s686, s687, s688, s689;
+ static volatile Object s690, s691, s692, s693, s694, s695, s696, s697, s698, s699;
+
+ static volatile Object s700, s701, s702, s703, s704, s705, s706, s707, s708, s709;
+ static volatile Object s710, s711, s712, s713, s714, s715, s716, s717, s718, s719;
+ static volatile Object s720, s721, s722, s723, s724, s725, s726, s727, s728, s729;
+ static volatile Object s730, s731, s732, s733, s734, s735, s736, s737, s738, s739;
+ static volatile Object s740, s741, s742, s743, s744, s745, s746, s747, s748, s749;
+ static volatile Object s750, s751, s752, s753, s754, s755, s756, s757, s758, s759;
+ static volatile Object s760, s761, s762, s763, s764, s765, s766, s767, s768, s769;
+ static volatile Object s770, s771, s772, s773, s774, s775, s776, s777, s778, s779;
+ static volatile Object s780, s781, s782, s783, s784, s785, s786, s787, s788, s789;
+ static volatile Object s790, s791, s792, s793, s794, s795, s796, s797, s798, s799;
+
+ static volatile Object s800, s801, s802, s803, s804, s805, s806, s807, s808, s809;
+ static volatile Object s810, s811, s812, s813, s814, s815, s816, s817, s818, s819;
+ static volatile Object s820, s821, s822, s823, s824, s825, s826, s827, s828, s829;
+ static volatile Object s830, s831, s832, s833, s834, s835, s836, s837, s838, s839;
+ static volatile Object s840, s841, s842, s843, s844, s845, s846, s847, s848, s849;
+ static volatile Object s850, s851, s852, s853, s854, s855, s856, s857, s858, s859;
+ static volatile Object s860, s861, s862, s863, s864, s865, s866, s867, s868, s869;
+ static volatile Object s870, s871, s872, s873, s874, s875, s876, s877, s878, s879;
+ static volatile Object s880, s881, s882, s883, s884, s885, s886, s887, s888, s889;
+ static volatile Object s890, s891, s892, s893, s894, s895, s896, s897, s898, s899;
+
+ static volatile Object s900, s901, s902, s903, s904, s905, s906, s907, s908, s909;
+ static volatile Object s910, s911, s912, s913, s914, s915, s916, s917, s918, s919;
+ static volatile Object s920, s921, s922, s923, s924, s925, s926, s927, s928, s929;
+ static volatile Object s930, s931, s932, s933, s934, s935, s936, s937, s938, s939;
+ static volatile Object s940, s941, s942, s943, s944, s945, s946, s947, s948, s949;
+ static volatile Object s950, s951, s952, s953, s954, s955, s956, s957, s958, s959;
+ static volatile Object s960, s961, s962, s963, s964, s965, s966, s967, s968, s969;
+ static volatile Object s970, s971, s972, s973, s974, s975, s976, s977, s978, s979;
+ static volatile Object s980, s981, s982, s983, s984, s985, s986, s987, s988, s989;
+ static volatile Object s990, s991, s992, s993, s994, s995, s996, s997, s998, s999;
+
+
+ volatile Object i0000, i0001, i0002, i0003, i0004, i0005, i0006, i0007, i0008, i0009;
+ volatile Object i0010, i0011, i0012, i0013, i0014, i0015, i0016, i0017, i0018, i0019;
+ volatile Object i0020, i0021, i0022, i0023, i0024, i0025, i0026, i0027, i0028, i0029;
+ volatile Object i0030, i0031, i0032, i0033, i0034, i0035, i0036, i0037, i0038, i0039;
+ volatile Object i0040, i0041, i0042, i0043, i0044, i0045, i0046, i0047, i0048, i0049;
+ volatile Object i0050, i0051, i0052, i0053, i0054, i0055, i0056, i0057, i0058, i0059;
+ volatile Object i0060, i0061, i0062, i0063, i0064, i0065, i0066, i0067, i0068, i0069;
+ volatile Object i0070, i0071, i0072, i0073, i0074, i0075, i0076, i0077, i0078, i0079;
+ volatile Object i0080, i0081, i0082, i0083, i0084, i0085, i0086, i0087, i0088, i0089;
+ volatile Object i0090, i0091, i0092, i0093, i0094, i0095, i0096, i0097, i0098, i0099;
+
+ volatile Object i0100, i0101, i0102, i0103, i0104, i0105, i0106, i0107, i0108, i0109;
+ volatile Object i0110, i0111, i0112, i0113, i0114, i0115, i0116, i0117, i0118, i0119;
+ volatile Object i0120, i0121, i0122, i0123, i0124, i0125, i0126, i0127, i0128, i0129;
+ volatile Object i0130, i0131, i0132, i0133, i0134, i0135, i0136, i0137, i0138, i0139;
+ volatile Object i0140, i0141, i0142, i0143, i0144, i0145, i0146, i0147, i0148, i0149;
+ volatile Object i0150, i0151, i0152, i0153, i0154, i0155, i0156, i0157, i0158, i0159;
+ volatile Object i0160, i0161, i0162, i0163, i0164, i0165, i0166, i0167, i0168, i0169;
+ volatile Object i0170, i0171, i0172, i0173, i0174, i0175, i0176, i0177, i0178, i0179;
+ volatile Object i0180, i0181, i0182, i0183, i0184, i0185, i0186, i0187, i0188, i0189;
+ volatile Object i0190, i0191, i0192, i0193, i0194, i0195, i0196, i0197, i0198, i0199;
+
+ volatile Object i0200, i0201, i0202, i0203, i0204, i0205, i0206, i0207, i0208, i0209;
+ volatile Object i0210, i0211, i0212, i0213, i0214, i0215, i0216, i0217, i0218, i0219;
+ volatile Object i0220, i0221, i0222, i0223, i0224, i0225, i0226, i0227, i0228, i0229;
+ volatile Object i0230, i0231, i0232, i0233, i0234, i0235, i0236, i0237, i0238, i0239;
+ volatile Object i0240, i0241, i0242, i0243, i0244, i0245, i0246, i0247, i0248, i0249;
+ volatile Object i0250, i0251, i0252, i0253, i0254, i0255, i0256, i0257, i0258, i0259;
+ volatile Object i0260, i0261, i0262, i0263, i0264, i0265, i0266, i0267, i0268, i0269;
+ volatile Object i0270, i0271, i0272, i0273, i0274, i0275, i0276, i0277, i0278, i0279;
+ volatile Object i0280, i0281, i0282, i0283, i0284, i0285, i0286, i0287, i0288, i0289;
+ volatile Object i0290, i0291, i0292, i0293, i0294, i0295, i0296, i0297, i0298, i0299;
+
+ volatile Object i0300, i0301, i0302, i0303, i0304, i0305, i0306, i0307, i0308, i0309;
+ volatile Object i0310, i0311, i0312, i0313, i0314, i0315, i0316, i0317, i0318, i0319;
+ volatile Object i0320, i0321, i0322, i0323, i0324, i0325, i0326, i0327, i0328, i0329;
+ volatile Object i0330, i0331, i0332, i0333, i0334, i0335, i0336, i0337, i0338, i0339;
+ volatile Object i0340, i0341, i0342, i0343, i0344, i0345, i0346, i0347, i0348, i0349;
+ volatile Object i0350, i0351, i0352, i0353, i0354, i0355, i0356, i0357, i0358, i0359;
+ volatile Object i0360, i0361, i0362, i0363, i0364, i0365, i0366, i0367, i0368, i0369;
+ volatile Object i0370, i0371, i0372, i0373, i0374, i0375, i0376, i0377, i0378, i0379;
+ volatile Object i0380, i0381, i0382, i0383, i0384, i0385, i0386, i0387, i0388, i0389;
+ volatile Object i0390, i0391, i0392, i0393, i0394, i0395, i0396, i0397, i0398, i0399;
+
+ volatile Object i0400, i0401, i0402, i0403, i0404, i0405, i0406, i0407, i0408, i0409;
+ volatile Object i0410, i0411, i0412, i0413, i0414, i0415, i0416, i0417, i0418, i0419;
+ volatile Object i0420, i0421, i0422, i0423, i0424, i0425, i0426, i0427, i0428, i0429;
+ volatile Object i0430, i0431, i0432, i0433, i0434, i0435, i0436, i0437, i0438, i0439;
+ volatile Object i0440, i0441, i0442, i0443, i0444, i0445, i0446, i0447, i0448, i0449;
+ volatile Object i0450, i0451, i0452, i0453, i0454, i0455, i0456, i0457, i0458, i0459;
+ volatile Object i0460, i0461, i0462, i0463, i0464, i0465, i0466, i0467, i0468, i0469;
+ volatile Object i0470, i0471, i0472, i0473, i0474, i0475, i0476, i0477, i0478, i0479;
+ volatile Object i0480, i0481, i0482, i0483, i0484, i0485, i0486, i0487, i0488, i0489;
+ volatile Object i0490, i0491, i0492, i0493, i0494, i0495, i0496, i0497, i0498, i0499;
+
+ volatile Object i0500, i0501, i0502, i0503, i0504, i0505, i0506, i0507, i0508, i0509;
+ volatile Object i0510, i0511, i0512, i0513, i0514, i0515, i0516, i0517, i0518, i0519;
+ volatile Object i0520, i0521, i0522, i0523, i0524, i0525, i0526, i0527, i0528, i0529;
+ volatile Object i0530, i0531, i0532, i0533, i0534, i0535, i0536, i0537, i0538, i0539;
+ volatile Object i0540, i0541, i0542, i0543, i0544, i0545, i0546, i0547, i0548, i0549;
+ volatile Object i0550, i0551, i0552, i0553, i0554, i0555, i0556, i0557, i0558, i0559;
+ volatile Object i0560, i0561, i0562, i0563, i0564, i0565, i0566, i0567, i0568, i0569;
+ volatile Object i0570, i0571, i0572, i0573, i0574, i0575, i0576, i0577, i0578, i0579;
+ volatile Object i0580, i0581, i0582, i0583, i0584, i0585, i0586, i0587, i0588, i0589;
+ volatile Object i0590, i0591, i0592, i0593, i0594, i0595, i0596, i0597, i0598, i0599;
+
+ volatile Object i0600, i0601, i0602, i0603, i0604, i0605, i0606, i0607, i0608, i0609;
+ volatile Object i0610, i0611, i0612, i0613, i0614, i0615, i0616, i0617, i0618, i0619;
+ volatile Object i0620, i0621, i0622, i0623, i0624, i0625, i0626, i0627, i0628, i0629;
+ volatile Object i0630, i0631, i0632, i0633, i0634, i0635, i0636, i0637, i0638, i0639;
+ volatile Object i0640, i0641, i0642, i0643, i0644, i0645, i0646, i0647, i0648, i0649;
+ volatile Object i0650, i0651, i0652, i0653, i0654, i0655, i0656, i0657, i0658, i0659;
+ volatile Object i0660, i0661, i0662, i0663, i0664, i0665, i0666, i0667, i0668, i0669;
+ volatile Object i0670, i0671, i0672, i0673, i0674, i0675, i0676, i0677, i0678, i0679;
+ volatile Object i0680, i0681, i0682, i0683, i0684, i0685, i0686, i0687, i0688, i0689;
+ volatile Object i0690, i0691, i0692, i0693, i0694, i0695, i0696, i0697, i0698, i0699;
+
+ volatile Object i0700, i0701, i0702, i0703, i0704, i0705, i0706, i0707, i0708, i0709;
+ volatile Object i0710, i0711, i0712, i0713, i0714, i0715, i0716, i0717, i0718, i0719;
+ volatile Object i0720, i0721, i0722, i0723, i0724, i0725, i0726, i0727, i0728, i0729;
+ volatile Object i0730, i0731, i0732, i0733, i0734, i0735, i0736, i0737, i0738, i0739;
+ volatile Object i0740, i0741, i0742, i0743, i0744, i0745, i0746, i0747, i0748, i0749;
+ volatile Object i0750, i0751, i0752, i0753, i0754, i0755, i0756, i0757, i0758, i0759;
+ volatile Object i0760, i0761, i0762, i0763, i0764, i0765, i0766, i0767, i0768, i0769;
+ volatile Object i0770, i0771, i0772, i0773, i0774, i0775, i0776, i0777, i0778, i0779;
+ volatile Object i0780, i0781, i0782, i0783, i0784, i0785, i0786, i0787, i0788, i0789;
+ volatile Object i0790, i0791, i0792, i0793, i0794, i0795, i0796, i0797, i0798, i0799;
+
+ volatile Object i0800, i0801, i0802, i0803, i0804, i0805, i0806, i0807, i0808, i0809;
+ volatile Object i0810, i0811, i0812, i0813, i0814, i0815, i0816, i0817, i0818, i0819;
+ volatile Object i0820, i0821, i0822, i0823, i0824, i0825, i0826, i0827, i0828, i0829;
+ volatile Object i0830, i0831, i0832, i0833, i0834, i0835, i0836, i0837, i0838, i0839;
+ volatile Object i0840, i0841, i0842, i0843, i0844, i0845, i0846, i0847, i0848, i0849;
+ volatile Object i0850, i0851, i0852, i0853, i0854, i0855, i0856, i0857, i0858, i0859;
+ volatile Object i0860, i0861, i0862, i0863, i0864, i0865, i0866, i0867, i0868, i0869;
+ volatile Object i0870, i0871, i0872, i0873, i0874, i0875, i0876, i0877, i0878, i0879;
+ volatile Object i0880, i0881, i0882, i0883, i0884, i0885, i0886, i0887, i0888, i0889;
+ volatile Object i0890, i0891, i0892, i0893, i0894, i0895, i0896, i0897, i0898, i0899;
+
+ volatile Object i0900, i0901, i0902, i0903, i0904, i0905, i0906, i0907, i0908, i0909;
+ volatile Object i0910, i0911, i0912, i0913, i0914, i0915, i0916, i0917, i0918, i0919;
+ volatile Object i0920, i0921, i0922, i0923, i0924, i0925, i0926, i0927, i0928, i0929;
+ volatile Object i0930, i0931, i0932, i0933, i0934, i0935, i0936, i0937, i0938, i0939;
+ volatile Object i0940, i0941, i0942, i0943, i0944, i0945, i0946, i0947, i0948, i0949;
+ volatile Object i0950, i0951, i0952, i0953, i0954, i0955, i0956, i0957, i0958, i0959;
+ volatile Object i0960, i0961, i0962, i0963, i0964, i0965, i0966, i0967, i0968, i0969;
+ volatile Object i0970, i0971, i0972, i0973, i0974, i0975, i0976, i0977, i0978, i0979;
+ volatile Object i0980, i0981, i0982, i0983, i0984, i0985, i0986, i0987, i0988, i0989;
+ volatile Object i0990, i0991, i0992, i0993, i0994, i0995, i0996, i0997, i0998, i0999;
+
+ volatile Object i1000, i1001, i1002, i1003, i1004, i1005, i1006, i1007, i1008, i1009;
+ volatile Object i1010, i1011, i1012, i1013, i1014, i1015, i1016, i1017, i1018, i1019;
+ volatile Object i1020, i1021, i1022, i1023, i1024, i1025, i1026, i1027, i1028, i1029;
+ volatile Object i1030, i1031, i1032, i1033, i1034, i1035, i1036, i1037, i1038, i1039;
+ volatile Object i1040, i1041, i1042, i1043, i1044, i1045, i1046, i1047, i1048, i1049;
+ volatile Object i1050, i1051, i1052, i1053, i1054, i1055, i1056, i1057, i1058, i1059;
+ volatile Object i1060, i1061, i1062, i1063, i1064, i1065, i1066, i1067, i1068, i1069;
+ volatile Object i1070, i1071, i1072, i1073, i1074, i1075, i1076, i1077, i1078, i1079;
+ volatile Object i1080, i1081, i1082, i1083, i1084, i1085, i1086, i1087, i1088, i1089;
+ volatile Object i1090, i1091, i1092, i1093, i1094, i1095, i1096, i1097, i1098, i1099;
+
+
+ // Note: ARM64, registers X16 and X17 are respectively IP0 and IP1,
+ // the scratch registers used by the VIXL AArch64 assembler (and to
+ // some extent, by ART's ARM64 code generator).
+
+ /// CHECK-START-ARM64: void Main.testStaticVolatileFieldGetWithLargeOffset() disassembly (after)
+ /// CHECK: StaticFieldGet
+ /// CHECK: mov x17, #<<Offset:0x[0-9a-f]{4}>>
+ /// CHECK: add x16, {{x\d+}}, x17
+ /// CHECK: ldar {{w\d+}}, [x16]
+ static void testStaticVolatileFieldGetWithLargeOffset() {
+ // The offset of this static field cannot be encoded as an immediate on ARM64.
+ Object s = s999;
+ }
+
+ /// CHECK-START-ARM64: void Main.testInstanceVolatileFieldGetWithLargeOffset() disassembly (after)
+ /// CHECK: InstanceFieldGet
+ /// CHECK: mov x17, #<<Offset:0x[0-9a-f]{4}>>
+ /// CHECK: add x16, {{x\d+}}, x17
+ /// CHECK: ldar {{w\d+}}, [x16]
+ void testInstanceVolatileFieldGetWithLargeOffset() {
+ // The offset of this instance field cannot be encoded as an immediate on ARM64.
+ Object i = i1029;
+ }
+
+
+ public static void main(String[] args) {
+ testStaticVolatileFieldGetWithLargeOffset();
+ Main m = new Main();
+ m.testInstanceVolatileFieldGetWithLargeOffset();
+ System.out.println("passed");
+ }
+
+}
diff --git a/test/636-wrong-static-access/expected.txt b/test/636-wrong-static-access/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/636-wrong-static-access/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/636-wrong-static-access/info.txt b/test/636-wrong-static-access/info.txt
new file mode 100644
index 0000000..184d858
--- /dev/null
+++ b/test/636-wrong-static-access/info.txt
@@ -0,0 +1,2 @@
+Test that the compiler checks if a resolved field is
+of the expected static/instance kind.
diff --git a/test/636-wrong-static-access/run b/test/636-wrong-static-access/run
new file mode 100755
index 0000000..5e99920
--- /dev/null
+++ b/test/636-wrong-static-access/run
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Make verification soft fail, to ensure the verifier does not flag
+# the method we want to compile as "non-compilable" because it sees
+# the method will throw IncompatibleClassChangeError.
+exec ${RUN} $@ --verify-soft-fail
diff --git a/test/636-wrong-static-access/src-ex/Foo.java b/test/636-wrong-static-access/src-ex/Foo.java
new file mode 100644
index 0000000..9e3b7a7
--- /dev/null
+++ b/test/636-wrong-static-access/src-ex/Foo.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Foo {
+ public static void doTest() {
+ // Execute foo once to make sure the dex cache will be updated.
+ try {
+ foo();
+ throw new Error("Expected IncompatibleClassChangeError");
+ } catch (IncompatibleClassChangeError e) {
+ // Expected.
+ }
+ Main.ensureJitCompiled(Foo.class, "foo");
+ try {
+ foo();
+ throw new Error("Expected IncompatibleClassChangeError");
+ } catch (IncompatibleClassChangeError e) {
+ // Expected.
+ }
+ }
+
+ public static void foo() {
+ System.out.println(Holder.field);
+ }
+}
diff --git a/test/636-wrong-static-access/src/Holder.java b/test/636-wrong-static-access/src/Holder.java
new file mode 100644
index 0000000..f3b1c57
--- /dev/null
+++ b/test/636-wrong-static-access/src/Holder.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Holder {
+ public static int field = 42;
+}
diff --git a/test/636-wrong-static-access/src/Main.java b/test/636-wrong-static-access/src/Main.java
new file mode 100644
index 0000000..bd8548e
--- /dev/null
+++ b/test/636-wrong-static-access/src/Main.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+
+public class Main {
+ static final String DEX_FILE = System.getenv("DEX_LOCATION") + "/636-wrong-static-access-ex.jar";
+
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ Class<?> pathClassLoader = Class.forName("dalvik.system.PathClassLoader");
+ if (pathClassLoader == null) {
+ throw new AssertionError("Couldn't find path class loader class");
+ }
+ Constructor<?> constructor =
+ pathClassLoader.getDeclaredConstructor(String.class, ClassLoader.class);
+ ClassLoader loader = (ClassLoader) constructor.newInstance(
+ DEX_FILE, ClassLoader.getSystemClassLoader());
+ Class<?> foo = loader.loadClass("Foo");
+ Method doTest = foo.getDeclaredMethod("doTest");
+ doTest.invoke(null);
+ }
+
+ public static native void ensureJitCompiled(Class<?> cls, String methodName);
+}
diff --git a/test/636-wrong-static-access/src2/Holder.java b/test/636-wrong-static-access/src2/Holder.java
new file mode 100644
index 0000000..a26da24
--- /dev/null
+++ b/test/636-wrong-static-access/src2/Holder.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Holder {
+ public int field = 42;
+}
diff --git a/test/706-checker-scheduler/expected.txt b/test/706-checker-scheduler/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/706-checker-scheduler/expected.txt
diff --git a/test/706-checker-scheduler/info.txt b/test/706-checker-scheduler/info.txt
new file mode 100644
index 0000000..b4ad9b4
--- /dev/null
+++ b/test/706-checker-scheduler/info.txt
@@ -0,0 +1 @@
+Tests for HInstruction scheduler.
diff --git a/test/706-checker-scheduler/src/Main.java b/test/706-checker-scheduler/src/Main.java
new file mode 100644
index 0000000..1721e42
--- /dev/null
+++ b/test/706-checker-scheduler/src/Main.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ static int static_variable = 0;
+
+ /// CHECK-START-ARM64: int Main.arrayAccess() scheduler (before)
+ /// CHECK: <<Const1:i\d+>> IntConstant 1
+ /// CHECK: <<i0:i\d+>> Phi
+ /// CHECK: <<res0:i\d+>> Phi
+ /// CHECK: <<Array:i\d+>> IntermediateAddress
+ /// CHECK: <<ArrayGet1:i\d+>> ArrayGet [<<Array>>,<<i0>>]
+ /// CHECK: <<res1:i\d+>> Add [<<res0>>,<<ArrayGet1>>]
+ /// CHECK: <<i1:i\d+>> Add [<<i0>>,<<Const1>>]
+ /// CHECK: <<ArrayGet2:i\d+>> ArrayGet [<<Array>>,<<i1>>]
+ /// CHECK: Add [<<res1>>,<<ArrayGet2>>]
+
+ /// CHECK-START-ARM64: int Main.arrayAccess() scheduler (after)
+ /// CHECK: <<Const1:i\d+>> IntConstant 1
+ /// CHECK: <<i0:i\d+>> Phi
+ /// CHECK: <<res0:i\d+>> Phi
+ /// CHECK: <<Array:i\d+>> IntermediateAddress
+ /// CHECK: <<ArrayGet1:i\d+>> ArrayGet [<<Array>>,<<i0>>]
+ /// CHECK: <<i1:i\d+>> Add [<<i0>>,<<Const1>>]
+ /// CHECK: <<ArrayGet2:i\d+>> ArrayGet [<<Array>>,<<i1>>]
+ /// CHECK: <<res1:i\d+>> Add [<<res0>>,<<ArrayGet1>>]
+ /// CHECK: Add [<<res1>>,<<ArrayGet2>>]
+
+ public static int arrayAccess() {
+ int res = 0;
+ int [] array = new int[10];
+ for (int i = 0; i < 9; i++) {
+ res += array[i];
+ res += array[i + 1];
+ }
+ return res;
+ }
+
+ /// CHECK-START-ARM64: int Main.intDiv(int) scheduler (before)
+ /// CHECK: Sub
+ /// CHECK: DivZeroCheck
+ /// CHECK: Div
+ /// CHECK: StaticFieldSet
+
+ /// CHECK-START-ARM64: int Main.intDiv(int) scheduler (after)
+ /// CHECK: Sub
+ /// CHECK-NOT: StaticFieldSet
+ /// CHECK: DivZeroCheck
+ /// CHECK-NOT: Sub
+ /// CHECK: Div
+ public static int intDiv(int arg) {
+ int res = 0;
+ int tmp = arg;
+ for (int i = 1; i < arg; i++) {
+ tmp -= i;
+ res = res / i; // div-zero check barrier.
+ static_variable++;
+ }
+ res += tmp;
+ return res;
+ }
+
+ public static void main(String[] args) {
+ if ((arrayAccess() + intDiv(10)) != -35) {
+ System.out.println("FAIL");
+ }
+ }
+}
diff --git a/test/900-hello-plugin/run b/test/900-hello-plugin/run
index 35b0871..50835f8 100755
--- a/test/900-hello-plugin/run
+++ b/test/900-hello-plugin/run
@@ -18,7 +18,5 @@
if [[ "$@" == *"-O"* ]]; then
plugin=libartagent.so
fi
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --runtime-option -agentpath:${plugin}=test_900 \
+./default-run "$@" --runtime-option -agentpath:${plugin}=test_900 \
--android-runtime-option -Xplugin:${plugin}
diff --git a/test/901-hello-ti-agent/run b/test/901-hello-ti-agent/run
index 4379349..c6e62ae 100755
--- a/test/901-hello-ti-agent/run
+++ b/test/901-hello-ti-agent/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/902-hello-transformation/run b/test/902-hello-transformation/run
index 4379349..c6e62ae 100755
--- a/test/902-hello-transformation/run
+++ b/test/902-hello-transformation/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/903-hello-tagging/run b/test/903-hello-tagging/run
index 4379349..c6e62ae 100755
--- a/test/903-hello-tagging/run
+++ b/test/903-hello-tagging/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/904-object-allocation/run b/test/904-object-allocation/run
index 4379349..c6e62ae 100755
--- a/test/904-object-allocation/run
+++ b/test/904-object-allocation/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/905-object-free/run b/test/905-object-free/run
index 4379349..c6e62ae 100755
--- a/test/905-object-free/run
+++ b/test/905-object-free/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/906-iterate-heap/run b/test/906-iterate-heap/run
index 4379349..c6e62ae 100755
--- a/test/906-iterate-heap/run
+++ b/test/906-iterate-heap/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/907-get-loaded-classes/run b/test/907-get-loaded-classes/run
index 4379349..c6e62ae 100755
--- a/test/907-get-loaded-classes/run
+++ b/test/907-get-loaded-classes/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/908-gc-start-finish/gc_callbacks.cc b/test/908-gc-start-finish/gc_callbacks.cc
index 59801ff..8f96ee6 100644
--- a/test/908-gc-start-finish/gc_callbacks.cc
+++ b/test/908-gc-start-finish/gc_callbacks.cc
@@ -38,43 +38,32 @@
}
extern "C" JNIEXPORT void JNICALL Java_Main_setupGcCallback(
- JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED) {
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
jvmtiEventCallbacks callbacks;
memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
callbacks.GarbageCollectionFinish = GarbageCollectionFinish;
callbacks.GarbageCollectionStart = GarbageCollectionStart;
jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
- if (ret != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(ret, &err);
- printf("Error setting callbacks: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
- }
+ JvmtiErrorToException(env, ret);
}
-extern "C" JNIEXPORT void JNICALL Java_Main_enableGcTracking(JNIEnv* env ATTRIBUTE_UNUSED,
+extern "C" JNIEXPORT void JNICALL Java_Main_enableGcTracking(JNIEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jboolean enable) {
jvmtiError ret = jvmti_env->SetEventNotificationMode(
enable ? JVMTI_ENABLE : JVMTI_DISABLE,
JVMTI_EVENT_GARBAGE_COLLECTION_START,
nullptr);
- if (ret != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(ret, &err);
- printf("Error enabling/disabling gc callbacks: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ if (JvmtiErrorToException(env, ret)) {
+ return;
}
ret = jvmti_env->SetEventNotificationMode(
enable ? JVMTI_ENABLE : JVMTI_DISABLE,
JVMTI_EVENT_GARBAGE_COLLECTION_FINISH,
nullptr);
- if (ret != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(ret, &err);
- printf("Error enabling/disabling gc callbacks: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ if (JvmtiErrorToException(env, ret)) {
+ return;
}
}
diff --git a/test/908-gc-start-finish/run b/test/908-gc-start-finish/run
index 4379349..c6e62ae 100755
--- a/test/908-gc-start-finish/run
+++ b/test/908-gc-start-finish/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/909-attach-agent/run b/test/909-attach-agent/run
index 985341b..4a2eb34 100755
--- a/test/909-attach-agent/run
+++ b/test/909-attach-agent/run
@@ -21,17 +21,13 @@
plugin=libopenjdkjvmti.so
fi
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --android-runtime-option -Xplugin:${plugin} \
- --android-runtime-option -Xfully-deoptable \
+./default-run "$@" --android-runtime-option -Xplugin:${plugin} \
+ --android-runtime-option -Xcompiler-option \
+ --android-runtime-option --debuggable \
--args agent:${agent}=909-attach-agent
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --android-runtime-option -Xfully-deoptable \
+./default-run "$@" --android-runtime-option -Xcompiler-option \
+ --android-runtime-option --debuggable \
--args agent:${agent}=909-attach-agent
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --args agent:${agent}=909-attach-agent
+./default-run "$@" --args agent:${agent}=909-attach-agent
diff --git a/test/909-attach-agent/src/Main.java b/test/909-attach-agent/src/Main.java
index 8a8a087..569b89a 100644
--- a/test/909-attach-agent/src/Main.java
+++ b/test/909-attach-agent/src/Main.java
@@ -19,7 +19,7 @@
public class Main {
public static void main(String[] args) {
- System.out.println("Hello, world!");
+ System.err.println("Hello, world!");
for(String a : args) {
if(a.startsWith("agent:")) {
String agent = a.substring(6);
@@ -30,6 +30,6 @@
}
}
}
- System.out.println("Goodbye!");
+ System.err.println("Goodbye!");
}
}
diff --git a/test/910-methods/run b/test/910-methods/run
index 4379349..c6e62ae 100755
--- a/test/910-methods/run
+++ b/test/910-methods/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/911-get-stack-trace/run b/test/911-get-stack-trace/run
index 4379349..c6e62ae 100755
--- a/test/911-get-stack-trace/run
+++ b/test/911-get-stack-trace/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/911-get-stack-trace/src/PrintThread.java b/test/911-get-stack-trace/src/PrintThread.java
index 97815cc..136fd80 100644
--- a/test/911-get-stack-trace/src/PrintThread.java
+++ b/test/911-get-stack-trace/src/PrintThread.java
@@ -44,6 +44,9 @@
if (name.contains("Daemon")) {
// Do not print daemon stacks, as they're non-deterministic.
stackSerialization = "<not printed>";
+ } else if (name.startsWith("Jit thread pool worker")) {
+ // Skip JIT thread pool. It may or may not be there depending on configuration.
+ continue;
} else {
StringBuilder sb = new StringBuilder();
for (String[] stackElement : (String[][])stackInfo[1]) {
diff --git a/test/912-classes/expected.txt b/test/912-classes/expected.txt
index d0b77a4..328216b 100644
--- a/test/912-classes/expected.txt
+++ b/test/912-classes/expected.txt
@@ -29,7 +29,7 @@
class [Ljava.lang.String; 10000
class java.lang.Object 111
class Main$TestForNonInit 11
-class Main$TestForInitFail 1001
+class Main$TestForInitFail 1011
int []
class [Ljava.lang.String; []
class java.lang.Object []
diff --git a/test/912-classes/run b/test/912-classes/run
index 20dfc4b..f24db40 100755
--- a/test/912-classes/run
+++ b/test/912-classes/run
@@ -18,7 +18,5 @@
# In certain configurations, the app images may be valid even in a new classloader. Turn off
# app images to avoid the issue.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti \
+./default-run "$@" --jvmti \
--no-app-image
diff --git a/test/913-heaps/run b/test/913-heaps/run
index 4379349..c6e62ae 100755
--- a/test/913-heaps/run
+++ b/test/913-heaps/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/914-hello-obsolescence/run b/test/914-hello-obsolescence/run
index 4379349..c6e62ae 100755
--- a/test/914-hello-obsolescence/run
+++ b/test/914-hello-obsolescence/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/915-obsolete-2/run b/test/915-obsolete-2/run
index 4379349..c6e62ae 100755
--- a/test/915-obsolete-2/run
+++ b/test/915-obsolete-2/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/916-obsolete-jit/run b/test/916-obsolete-jit/run
index 9056211..b6d406f 100755
--- a/test/916-obsolete-jit/run
+++ b/test/916-obsolete-jit/run
@@ -21,7 +21,5 @@
else
other_args="--jit"
fi
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- ${other_args} \
+./default-run "$@" ${other_args} \
--jvmti
diff --git a/test/916-obsolete-jit/src/Main.java b/test/916-obsolete-jit/src/Main.java
index 1b03200..2b3296f 100644
--- a/test/916-obsolete-jit/src/Main.java
+++ b/test/916-obsolete-jit/src/Main.java
@@ -116,37 +116,27 @@
doTest(new Transform(), new TestWatcher());
}
- // TODO Workaround to (1) inability to ensure that current_method is not put into a register by
- // the JIT and/or (2) inability to deoptimize frames near runtime functions.
- // TODO Fix one/both of these issues.
- public static void doCall(Runnable r) {
- r.run();
- }
-
private static boolean interpreting = true;
private static boolean retry = false;
public static void doTest(Transform t, TestWatcher w) {
// Get the methods that need to be optimized.
Method say_hi_method;
- Method do_call_method;
// Figure out if we can even JIT at all.
final boolean has_jit = hasJit();
try {
say_hi_method = Transform.class.getDeclaredMethod(
"sayHi", Runnable.class, Consumer.class);
- do_call_method = Main.class.getDeclaredMethod("doCall", Runnable.class);
} catch (Exception e) {
System.out.println("Unable to find methods!");
e.printStackTrace();
return;
}
// Makes sure the stack is the way we want it for the test and does the redefinition. It will
- // set the retry boolean to true if we need to go around again due to a bad stack.
+ // set the retry boolean to true if the stack does not have a JIT-compiled sayHi entry. This can
+ // only happen if the method gets GC'd.
Runnable do_redefinition = () -> {
- if (has_jit &&
- (Main.isInterpretedFunction(say_hi_method, true) ||
- Main.isInterpretedFunction(do_call_method, false))) {
+ if (has_jit && Main.isInterpretedFunction(say_hi_method, true)) {
// Try again. We are not running the right jitted methods/cannot redefine them now.
retry = true;
} else {
@@ -161,7 +151,6 @@
do {
// Run ensureJitCompiled here since it might get GCd
ensureJitCompiled(Transform.class, "sayHi");
- ensureJitCompiled(Main.class, "doCall");
// Clear output.
w.clear();
// Try and redefine.
diff --git a/test/916-obsolete-jit/src/Transform.java b/test/916-obsolete-jit/src/Transform.java
index f4dcf09..9c9adbc 100644
--- a/test/916-obsolete-jit/src/Transform.java
+++ b/test/916-obsolete-jit/src/Transform.java
@@ -29,13 +29,7 @@
reporter.accept("Pre Start private method call");
Start(reporter);
reporter.accept("Post Start private method call");
- // TODO Revisit with b/33616143
- // TODO Uncomment this once either b/33630159 or b/33616143 are resolved.
- // r.run();
- // TODO This doCall function is a very temporary fix until we get either deoptimization near
- // runtime frames working, forcing current method to be always read from the stack or both
- // working.
- Main.doCall(r);
+ r.run();
reporter.accept("Pre Finish private method call");
Finish(reporter);
reporter.accept("Post Finish private method call");
diff --git a/test/917-fields-transformation/run b/test/917-fields-transformation/run
index 4379349..c6e62ae 100755
--- a/test/917-fields-transformation/run
+++ b/test/917-fields-transformation/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/918-fields/run b/test/918-fields/run
index 4379349..c6e62ae 100755
--- a/test/918-fields/run
+++ b/test/918-fields/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/919-obsolete-fields/run b/test/919-obsolete-fields/run
index 4379349..c6e62ae 100755
--- a/test/919-obsolete-fields/run
+++ b/test/919-obsolete-fields/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/919-obsolete-fields/src/Main.java b/test/919-obsolete-fields/src/Main.java
index 1d893f1..ffb9897 100644
--- a/test/919-obsolete-fields/src/Main.java
+++ b/test/919-obsolete-fields/src/Main.java
@@ -120,13 +120,6 @@
doTest(new Transform(w), w);
}
- // TODO Workaround to (1) inability to ensure that current_method is not put into a register by
- // the JIT and/or (2) inability to deoptimize frames near runtime functions.
- // TODO Fix one/both of these issues.
- public static void doCall(Runnable r) {
- r.run();
- }
-
private static boolean interpreting = true;
private static boolean retry = false;
diff --git a/test/919-obsolete-fields/src/Transform.java b/test/919-obsolete-fields/src/Transform.java
index abd1d19..c8e3cbd 100644
--- a/test/919-obsolete-fields/src/Transform.java
+++ b/test/919-obsolete-fields/src/Transform.java
@@ -34,12 +34,7 @@
reporter.accept("Pre Start private method call");
Start();
reporter.accept("Post Start private method call");
- // TODO Revist with b/33616143
- // TODO Uncomment this
- // r.run();
- // TODO This is a very temporary fix until we get either deoptimization near runtime frames
- // working, forcing current method to be always read from the stack or both working.
- Main.doCall(r);
+ r.run();
reporter.accept("Pre Finish private method call");
Finish();
reporter.accept("Post Finish private method call");
diff --git a/test/920-objects/run b/test/920-objects/run
index 4379349..c6e62ae 100755
--- a/test/920-objects/run
+++ b/test/920-objects/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/921-hello-failure/expected.txt b/test/921-hello-failure/expected.txt
index 9615e6b..e9b6a20 100644
--- a/test/921-hello-failure/expected.txt
+++ b/test/921-hello-failure/expected.txt
@@ -29,3 +29,21 @@
Transformation error : java.lang.Exception(Failed to retransform classes <LTransform;, LTransform2;> due to JVMTI_ERROR_NAMES_DONT_MATCH)
hello - MultiRetrans
hello2 - MultiRetrans
+hello - NewMethod
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED)
+hello - NewMethod
+hello2 - MissingMethod
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform3;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED)
+hello2 - MissingMethod
+hello - MethodChange
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED)
+hello - MethodChange
+hello - NewField
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED)
+hello - NewField
+hello there - MissingField
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform4;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED)
+hello there - MissingField
+hello there again - FieldChange
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform4;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED)
+hello there again - FieldChange
diff --git a/test/921-hello-failure/run b/test/921-hello-failure/run
index 3ef4832..8be0ed4 100755
--- a/test/921-hello-failure/run
+++ b/test/921-hello-failure/run
@@ -15,6 +15,4 @@
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/921-hello-failure/src/FieldChange.java b/test/921-hello-failure/src/FieldChange.java
new file mode 100644
index 0000000..cc2ea28
--- /dev/null
+++ b/test/921-hello-failure/src/FieldChange.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class FieldChange {
+ // The following is a base64 encoding of the following class.
+ // class Transform4 {
+ // private Object greeting;
+ // public Transform4(String hi) { }
+ // public void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAFwoABgAQBwARCAASCgACABMHABQHABUBAAhncmVldGluZwEAEkxqYXZhL2xhbmcv" +
+ "T2JqZWN0OwEABjxpbml0PgEAFShMamF2YS9sYW5nL1N0cmluZzspVgEABENvZGUBAA9MaW5lTnVt" +
+ "YmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA9UcmFuc2Zvcm00LmphdmEMAAkAFgEAD2ph" +
+ "dmEvbGFuZy9FcnJvcgEAFVNob3VsZCBub3QgYmUgY2FsbGVkIQwACQAKAQAKVHJhbnNmb3JtNAEA" +
+ "EGphdmEvbGFuZy9PYmplY3QBAAMoKVYAIAAFAAYAAAABAAIABwAIAAAAAgABAAkACgABAAsAAAAd" +
+ "AAEAAgAAAAUqtwABsQAAAAEADAAAAAYAAQAAAAMAAQANAAoAAQALAAAAIgADAAIAAAAKuwACWRID" +
+ "twAEvwAAAAEADAAAAAYAAQAAAAUAAQAOAAAAAgAP");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQASXs5yszuhud+/w4q07495k9eO7Yb+l8u4AgAAcAAAAHhWNBIAAAAAAAAAABgCAAAM" +
+ "AAAAcAAAAAUAAACgAAAAAgAAALQAAAABAAAAzAAAAAQAAADUAAAAAQAAAPQAAACkAQAAFAEAAFYB" +
+ "AABeAQAAbAEAAH8BAACTAQAApwEAAL4BAADPAQAA0gEAANYBAADqAQAA9AEAAAEAAAACAAAAAwAA" +
+ "AAQAAAAHAAAABwAAAAQAAAAAAAAACAAAAAQAAABQAQAAAAACAAoAAAAAAAEAAAAAAAAAAQALAAAA" +
+ "AQABAAAAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAAcCAAAAAAAAAgACAAEAAAD7" +
+ "AQAABAAAAHAQAwAAAA4ABAACAAIAAAABAgAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMA" +
+ "Bjxpbml0PgAMTFRyYW5zZm9ybTQ7ABFMamF2YS9sYW5nL0Vycm9yOwASTGphdmEvbGFuZy9PYmpl" +
+ "Y3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAFVNob3VsZCBub3QgYmUgY2FsbGVkIQAPVHJhbnNmb3Jt" +
+ "NC5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00LjIyAAhncmVldGluZwAFc2F5SGkAAwEABw4A" +
+ "BQEABw4AAAEBAQACAIGABJQCAQGsAgANAAAAAAAAAAEAAAAAAAAAAQAAAAwAAABwAAAAAgAAAAUA" +
+ "AACgAAAAAwAAAAIAAAC0AAAABAAAAAEAAADMAAAABQAAAAQAAADUAAAABgAAAAEAAAD0AAAAASAA" +
+ "AAIAAAAUAQAAARAAAAEAAABQAQAAAiAAAAwAAABWAQAAAyAAAAIAAAD7AQAAACAAAAEAAAAHAgAA" +
+ "ABAAAAEAAAAYAgAA");
+
+ public static void doTest(Transform4 t) {
+ t.sayHi("FieldChange");
+ try {
+ Main.doCommonClassRedefinition(Transform4.class, CLASS_BYTES, DEX_BYTES);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t.sayHi("FieldChange");
+ }
+}
diff --git a/test/921-hello-failure/src/Main.java b/test/921-hello-failure/src/Main.java
index 67ca1e1..61d69e7 100644
--- a/test/921-hello-failure/src/Main.java
+++ b/test/921-hello-failure/src/Main.java
@@ -25,6 +25,12 @@
ReorderInterface.doTest(new Transform2());
MultiRedef.doTest(new Transform(), new Transform2());
MultiRetrans.doTest(new Transform(), new Transform2());
+ NewMethod.doTest(new Transform());
+ MissingMethod.doTest(new Transform3());
+ MethodChange.doTest(new Transform());
+ NewField.doTest(new Transform());
+ MissingField.doTest(new Transform4("there"));
+ FieldChange.doTest(new Transform4("there again"));
}
// Transforms the class. This throws an exception if something goes wrong.
diff --git a/test/921-hello-failure/src/MethodChange.java b/test/921-hello-failure/src/MethodChange.java
new file mode 100644
index 0000000..16f5778
--- /dev/null
+++ b/test/921-hello-failure/src/MethodChange.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class MethodChange {
+ // The following is a base64 encoding of the following class.
+ // class Transform {
+ // void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAFQoABgAPBwAQCAARCgACABIHABMHABQBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAP" +
+ "TGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApTb3VyY2VG" +
+ "aWxlAQAOVHJhbnNmb3JtLmphdmEMAAcACAEAD2phdmEvbGFuZy9FcnJvcgEAFVNob3VsZCBub3Qg" +
+ "YmUgY2FsbGVkIQwABwAMAQAJVHJhbnNmb3JtAQAQamF2YS9sYW5nL09iamVjdAAgAAUABgAAAAAA" +
+ "AgAAAAcACAABAAkAAAAdAAEAAQAAAAUqtwABsQAAAAEACgAAAAYAAQAAAAIAAAALAAwAAQAJAAAA" +
+ "IgADAAIAAAAKuwACWRIDtwAEvwAAAAEACgAAAAYAAQAAAAQAAQANAAAAAgAO");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCrV81cy4Q+YKMMMqc0bZEO5Y1X5u7irPeQAgAAcAAAAHhWNBIAAAAAAAAAAPwBAAAL" +
+ "AAAAcAAAAAUAAACcAAAAAgAAALAAAAAAAAAAAAAAAAQAAADIAAAAAQAAAOgAAACIAQAACAEAAEoB" +
+ "AABSAQAAXwEAAHIBAACGAQAAmgEAALEBAADBAQAAxAEAAMgBAADcAQAAAQAAAAIAAAADAAAABAAA" +
+ "AAcAAAAHAAAABAAAAAAAAAAIAAAABAAAAEQBAAAAAAAAAAAAAAAAAQAKAAAAAQABAAAAAAACAAAA" +
+ "AAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAO4BAAAAAAAAAQABAAEAAADjAQAABAAAAHAQAwAA" +
+ "AA4ABAACAAIAAADoAQAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMABjxpbml0PgALTFRy" +
+ "YW5zZm9ybTsAEUxqYXZhL2xhbmcvRXJyb3I7ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xh" +
+ "bmcvU3RyaW5nOwAVU2hvdWxkIG5vdCBiZSBjYWxsZWQhAA5UcmFuc2Zvcm0uamF2YQABVgACVkwA" +
+ "EmVtaXR0ZXI6IGphY2stNC4yNAAFc2F5SGkAAgAHDgAEAQAHDgAAAAEBAICABIgCAQCgAgwAAAAA" +
+ "AAAAAQAAAAAAAAABAAAACwAAAHAAAAACAAAABQAAAJwAAAADAAAAAgAAALAAAAAFAAAABAAAAMgA" +
+ "AAAGAAAAAQAAAOgAAAABIAAAAgAAAAgBAAABEAAAAQAAAEQBAAACIAAACwAAAEoBAAADIAAAAgAA" +
+ "AOMBAAAAIAAAAQAAAO4BAAAAEAAAAQAAAPwBAAA=");
+
+ public static void doTest(Transform t) {
+ t.sayHi("MethodChange");
+ try {
+ Main.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t.sayHi("MethodChange");
+ }
+}
diff --git a/test/921-hello-failure/src/MissingField.java b/test/921-hello-failure/src/MissingField.java
new file mode 100644
index 0000000..2f643cc
--- /dev/null
+++ b/test/921-hello-failure/src/MissingField.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class MissingField {
+ // The following is a base64 encoding of the following class.
+ // class Transform4 {
+ // public Transform4(String s) { }
+ // public void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAFQoABgAOBwAPCAAQCgACABEHABIHABMBAAY8aW5pdD4BABUoTGphdmEvbGFuZy9T" +
+ "dHJpbmc7KVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAFc2F5SGkBAApTb3VyY2VGaWxlAQAP" +
+ "VHJhbnNmb3JtNC5qYXZhDAAHABQBAA9qYXZhL2xhbmcvRXJyb3IBABVTaG91bGQgbm90IGJlIGNh" +
+ "bGxlZCEMAAcACAEAClRyYW5zZm9ybTQBABBqYXZhL2xhbmcvT2JqZWN0AQADKClWACAABQAGAAAA" +
+ "AAACAAEABwAIAAEACQAAAB0AAQACAAAABSq3AAGxAAAAAQAKAAAABgABAAAAAgABAAsACAABAAkA" +
+ "AAAiAAMAAgAAAAq7AAJZEgO3AAS/AAAAAQAKAAAABgABAAAABAABAAwAAAACAA0=");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQDBVUVrMUEFx3lYkgJF54evq9vHvOUDZveUAgAAcAAAAHhWNBIAAAAAAAAAAAACAAAL" +
+ "AAAAcAAAAAUAAACcAAAAAgAAALAAAAAAAAAAAAAAAAQAAADIAAAAAQAAAOgAAACMAQAACAEAAEoB" +
+ "AABSAQAAYAEAAHMBAACHAQAAmwEAALIBAADDAQAAxgEAAMoBAADeAQAAAQAAAAIAAAADAAAABAAA" +
+ "AAcAAAAHAAAABAAAAAAAAAAIAAAABAAAAEQBAAAAAAEAAAAAAAAAAQAKAAAAAQABAAAAAAACAAAA" +
+ "AAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAPEBAAAAAAAAAgACAAEAAADlAQAABAAAAHAQAwAA" +
+ "AA4ABAACAAIAAADrAQAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMABjxpbml0PgAMTFRy" +
+ "YW5zZm9ybTQ7ABFMamF2YS9sYW5nL0Vycm9yOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9s" +
+ "YW5nL1N0cmluZzsAFVNob3VsZCBub3QgYmUgY2FsbGVkIQAPVHJhbnNmb3JtNC5qYXZhAAFWAAJW" +
+ "TAASZW1pdHRlcjogamFjay00LjIyAAVzYXlIaQACAQAHDgAEAQAHDgAAAAEBAIGABIgCAQGgAgAM" +
+ "AAAAAAAAAAEAAAAAAAAAAQAAAAsAAABwAAAAAgAAAAUAAACcAAAAAwAAAAIAAACwAAAABQAAAAQA" +
+ "AADIAAAABgAAAAEAAADoAAAAASAAAAIAAAAIAQAAARAAAAEAAABEAQAAAiAAAAsAAABKAQAAAyAA" +
+ "AAIAAADlAQAAACAAAAEAAADxAQAAABAAAAEAAAAAAgAA");
+
+ public static void doTest(Transform4 t) {
+ t.sayHi("MissingField");
+ try {
+ Main.doCommonClassRedefinition(Transform4.class, CLASS_BYTES, DEX_BYTES);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t.sayHi("MissingField");
+ }
+}
diff --git a/test/921-hello-failure/src/MissingMethod.java b/test/921-hello-failure/src/MissingMethod.java
new file mode 100644
index 0000000..3f1925c
--- /dev/null
+++ b/test/921-hello-failure/src/MissingMethod.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class MissingMethod {
+ // The following is a base64 encoding of the following class.
+ // class Transform3 {
+ // public void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAFQoABgAPBwAQCAARCgACABIHABMHABQBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAP" +
+ "TGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApTb3VyY2VG" +
+ "aWxlAQAPVHJhbnNmb3JtMy5qYXZhDAAHAAgBAA9qYXZhL2xhbmcvRXJyb3IBABVTaG91bGQgbm90" +
+ "IGJlIGNhbGxlZCEMAAcADAEAClRyYW5zZm9ybTMBABBqYXZhL2xhbmcvT2JqZWN0ACAABQAGAAAA" +
+ "AAACAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAAgABAAsADAABAAkA" +
+ "AAAiAAMAAgAAAAq7AAJZEgO3AAS/AAAAAQAKAAAABgABAAAABAABAA0AAAACAA4=");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQDnVQvyn7XrwDiCC/SE55zBCtEqk4pzA2mUAgAAcAAAAHhWNBIAAAAAAAAAAAACAAAL" +
+ "AAAAcAAAAAUAAACcAAAAAgAAALAAAAAAAAAAAAAAAAQAAADIAAAAAQAAAOgAAACMAQAACAEAAEoB" +
+ "AABSAQAAYAEAAHMBAACHAQAAmwEAALIBAADDAQAAxgEAAMoBAADeAQAAAQAAAAIAAAADAAAABAAA" +
+ "AAcAAAAHAAAABAAAAAAAAAAIAAAABAAAAEQBAAAAAAAAAAAAAAAAAQAKAAAAAQABAAAAAAACAAAA" +
+ "AAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAPABAAAAAAAAAQABAAEAAADlAQAABAAAAHAQAwAA" +
+ "AA4ABAACAAIAAADqAQAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMABjxpbml0PgAMTFRy" +
+ "YW5zZm9ybTM7ABFMamF2YS9sYW5nL0Vycm9yOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9s" +
+ "YW5nL1N0cmluZzsAFVNob3VsZCBub3QgYmUgY2FsbGVkIQAPVHJhbnNmb3JtMy5qYXZhAAFWAAJW" +
+ "TAASZW1pdHRlcjogamFjay00LjI0AAVzYXlIaQACAAcOAAQBAAcOAAAAAQEAgIAEiAIBAaACAAAM" +
+ "AAAAAAAAAAEAAAAAAAAAAQAAAAsAAABwAAAAAgAAAAUAAACcAAAAAwAAAAIAAACwAAAABQAAAAQA" +
+ "AADIAAAABgAAAAEAAADoAAAAASAAAAIAAAAIAQAAARAAAAEAAABEAQAAAiAAAAsAAABKAQAAAyAA" +
+ "AAIAAADlAQAAACAAAAEAAADwAQAAABAAAAEAAAAAAgAA");
+
+ public static void doTest(Transform3 t) {
+ t.sayHi("MissingMethod");
+ try {
+ Main.doCommonClassRedefinition(Transform3.class, CLASS_BYTES, DEX_BYTES);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t.sayHi("MissingMethod");
+ }
+}
diff --git a/test/921-hello-failure/src/NewField.java b/test/921-hello-failure/src/NewField.java
new file mode 100644
index 0000000..c85b79e
--- /dev/null
+++ b/test/921-hello-failure/src/NewField.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class NewField {
+ // The following is a base64 encoding of the following class.
+ // class Transform {
+ // private Object field;
+ // public void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAFwoABgARBwASCAATCgACABQHABUHABYBAAVmaWVsZAEAEkxqYXZhL2xhbmcvT2Jq" +
+ "ZWN0OwEABjxpbml0PgEAAygpVgEABENvZGUBAA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAFShM" +
+ "amF2YS9sYW5nL1N0cmluZzspVgEAClNvdXJjZUZpbGUBAA5UcmFuc2Zvcm0uamF2YQwACQAKAQAP" +
+ "amF2YS9sYW5nL0Vycm9yAQAVU2hvdWxkIG5vdCBiZSBjYWxsZWQhDAAJAA4BAAlUcmFuc2Zvcm0B" +
+ "ABBqYXZhL2xhbmcvT2JqZWN0ACAABQAGAAAAAQACAAcACAAAAAIAAAAJAAoAAQALAAAAHQABAAEA" +
+ "AAAFKrcAAbEAAAABAAwAAAAGAAEAAAABAAEADQAOAAEACwAAACIAAwACAAAACrsAAlkSA7cABL8A" +
+ "AAABAAwAAAAGAAEAAAAEAAEADwAAAAIAEA==");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQBNWknL2iyjim487p0EIH/8V5OjOeLgw5e0AgAAcAAAAHhWNBIAAAAAAAAAABQCAAAM" +
+ "AAAAcAAAAAUAAACgAAAAAgAAALQAAAABAAAAzAAAAAQAAADUAAAAAQAAAPQAAACgAQAAFAEAAFYB" +
+ "AABeAQAAawEAAH4BAACSAQAApgEAAL0BAADNAQAA0AEAANQBAADoAQAA7wEAAAEAAAACAAAAAwAA" +
+ "AAQAAAAHAAAABwAAAAQAAAAAAAAACAAAAAQAAABQAQAAAAACAAoAAAAAAAAAAAAAAAAAAQALAAAA" +
+ "AQABAAAAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAAECAAAAAAAAAQABAAEAAAD2" +
+ "AQAABAAAAHAQAwAAAA4ABAACAAIAAAD7AQAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMA" +
+ "Bjxpbml0PgALTFRyYW5zZm9ybTsAEUxqYXZhL2xhbmcvRXJyb3I7ABJMamF2YS9sYW5nL09iamVj" +
+ "dDsAEkxqYXZhL2xhbmcvU3RyaW5nOwAVU2hvdWxkIG5vdCBiZSBjYWxsZWQhAA5UcmFuc2Zvcm0u" +
+ "amF2YQABVgACVkwAEmVtaXR0ZXI6IGphY2stNC4yMgAFZmllbGQABXNheUhpAAEABw4ABAEABw4A" +
+ "AAEBAQACAICABJQCAQGsAgAAAA0AAAAAAAAAAQAAAAAAAAABAAAADAAAAHAAAAACAAAABQAAAKAA" +
+ "AAADAAAAAgAAALQAAAAEAAAAAQAAAMwAAAAFAAAABAAAANQAAAAGAAAAAQAAAPQAAAABIAAAAgAA" +
+ "ABQBAAABEAAAAQAAAFABAAACIAAADAAAAFYBAAADIAAAAgAAAPYBAAAAIAAAAQAAAAECAAAAEAAA" +
+ "AQAAABQCAAA=");
+
+ public static void doTest(Transform t) {
+ t.sayHi("NewField");
+ try {
+ Main.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t.sayHi("NewField");
+ }
+}
diff --git a/test/921-hello-failure/src/NewMethod.java b/test/921-hello-failure/src/NewMethod.java
new file mode 100644
index 0000000..5eac670
--- /dev/null
+++ b/test/921-hello-failure/src/NewMethod.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class NewMethod {
+ // The following is a base64 encoding of the following class.
+ // class Transform {
+ // public void extraMethod() {}
+ // public void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAFgoABgAQBwARCAASCgACABMHABQHABUBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAP" +
+ "TGluZU51bWJlclRhYmxlAQALZXh0cmFNZXRob2QBAAVzYXlIaQEAFShMamF2YS9sYW5nL1N0cmlu" +
+ "ZzspVgEAClNvdXJjZUZpbGUBAA5UcmFuc2Zvcm0uamF2YQwABwAIAQAPamF2YS9sYW5nL0Vycm9y" +
+ "AQAVU2hvdWxkIG5vdCBiZSBjYWxsZWQhDAAHAA0BAAlUcmFuc2Zvcm0BABBqYXZhL2xhbmcvT2Jq" +
+ "ZWN0ACAABQAGAAAAAAADAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAA" +
+ "AQABAAsACAABAAkAAAAZAAAAAQAAAAGxAAAAAQAKAAAABgABAAAAAgABAAwADQABAAkAAAAiAAMA" +
+ "AgAAAAq7AAJZEgO3AAS/AAAAAQAKAAAABgABAAAABAABAA4AAAACAA8=");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQBeV7dLAwN1GBTa/yRlkuiIQatNHghVdrnIAgAAcAAAAHhWNBIAAAAAAAAAADQCAAAM" +
+ "AAAAcAAAAAUAAACgAAAAAgAAALQAAAAAAAAAAAAAAAUAAADMAAAAAQAAAPQAAAC0AQAAFAEAAGoB" +
+ "AAByAQAAfwEAAJIBAACmAQAAugEAANEBAADhAQAA5AEAAOgBAAD8AQAACQIAAAEAAAACAAAAAwAA" +
+ "AAQAAAAHAAAABwAAAAQAAAAAAAAACAAAAAQAAABkAQAAAAAAAAAAAAAAAAAACgAAAAAAAQALAAAA" +
+ "AQABAAAAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAACACAAAAAAAAAQABAAEAAAAQ" +
+ "AgAABAAAAHAQBAAAAA4AAQABAAAAAAAVAgAAAQAAAA4AAAAEAAIAAgAAABoCAAAJAAAAIgABABsB" +
+ "BQAAAHAgAwAQACcAAAABAAAAAwAGPGluaXQ+AAtMVHJhbnNmb3JtOwARTGphdmEvbGFuZy9FcnJv" +
+ "cjsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABVTaG91bGQgbm90IGJl" +
+ "IGNhbGxlZCEADlRyYW5zZm9ybS5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00LjIyAAtleHRy" +
+ "YU1ldGhvZAAFc2F5SGkAAQAHDgACAAcOAAQBAAcOAAAAAQIAgIAElAIBAawCAQHAAgAADAAAAAAA" +
+ "AAABAAAAAAAAAAEAAAAMAAAAcAAAAAIAAAAFAAAAoAAAAAMAAAACAAAAtAAAAAUAAAAFAAAAzAAA" +
+ "AAYAAAABAAAA9AAAAAEgAAADAAAAFAEAAAEQAAABAAAAZAEAAAIgAAAMAAAAagEAAAMgAAADAAAA" +
+ "EAIAAAAgAAABAAAAIAIAAAAQAAABAAAANAIAAA==");
+
+ public static void doTest(Transform t) {
+ t.sayHi("NewMethod");
+ try {
+ Main.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t.sayHi("NewMethod");
+ }
+}
diff --git a/test/921-hello-failure/src/Transform3.java b/test/921-hello-failure/src/Transform3.java
new file mode 100644
index 0000000..d2cb064
--- /dev/null
+++ b/test/921-hello-failure/src/Transform3.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform3 {
+ public void extraMethod(String name) {
+ System.out.println("extraMethod - " + name);
+ }
+ public void sayHi(String name) {
+ System.out.println("hello2 - " + name);
+ }
+}
diff --git a/test/921-hello-failure/src/Transform4.java b/test/921-hello-failure/src/Transform4.java
new file mode 100644
index 0000000..fd76338
--- /dev/null
+++ b/test/921-hello-failure/src/Transform4.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform4 {
+ private String greeting;
+ public Transform4(String hi) {
+ greeting = hi;
+ }
+ public void sayHi(String name) {
+ System.out.println("hello " + greeting + " - " + name);
+ }
+}
diff --git a/test/922-properties/run b/test/922-properties/run
index 4379349..c6e62ae 100755
--- a/test/922-properties/run
+++ b/test/922-properties/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/923-monitors/run b/test/923-monitors/run
index 4379349..c6e62ae 100755
--- a/test/923-monitors/run
+++ b/test/923-monitors/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/924-threads/run b/test/924-threads/run
index 4379349..c6e62ae 100755
--- a/test/924-threads/run
+++ b/test/924-threads/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/924-threads/src/Main.java b/test/924-threads/src/Main.java
index 29c4aa3..f18d70e 100644
--- a/test/924-threads/src/Main.java
+++ b/test/924-threads/src/Main.java
@@ -20,6 +20,7 @@
import java.util.Comparator;
import java.util.concurrent.CountDownLatch;
import java.util.HashMap;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -162,8 +163,20 @@
private static void doAllThreadsTests() {
Thread[] threads = getAllThreads();
- Arrays.sort(threads, THREAD_COMP);
- System.out.println(Arrays.toString(threads));
+ List<Thread> threadList = new ArrayList<>(Arrays.asList(threads));
+
+ // Filter out JIT thread. It may or may not be there depending on configuration.
+ Iterator<Thread> it = threadList.iterator();
+ while (it.hasNext()) {
+ Thread t = it.next();
+ if (t.getName().startsWith("Jit thread pool worker")) {
+ it.remove();
+ break;
+ }
+ }
+
+ Collections.sort(threadList, THREAD_COMP);
+ System.out.println(threadList);
}
private static void doTLSTests() throws Exception {
diff --git a/test/925-threadgroups/run b/test/925-threadgroups/run
index 4379349..c6e62ae 100755
--- a/test/925-threadgroups/run
+++ b/test/925-threadgroups/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/925-threadgroups/src/Main.java b/test/925-threadgroups/src/Main.java
index 3d7a4ca..bf7441f 100644
--- a/test/925-threadgroups/src/Main.java
+++ b/test/925-threadgroups/src/Main.java
@@ -14,8 +14,12 @@
* limitations under the License.
*/
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
public class Main {
public static void main(String[] args) throws Exception {
@@ -64,10 +68,23 @@
Thread[] threads = (Thread[])data[0];
ThreadGroup[] groups = (ThreadGroup[])data[1];
- Arrays.sort(threads, THREAD_COMP);
+ List<Thread> threadList = new ArrayList<>(Arrays.asList(threads));
+
+ // Filter out JIT thread. It may or may not be there depending on configuration.
+ Iterator<Thread> it = threadList.iterator();
+ while (it.hasNext()) {
+ Thread t = it.next();
+ if (t.getName().startsWith("Jit thread pool worker")) {
+ it.remove();
+ break;
+ }
+ }
+
+ Collections.sort(threadList, THREAD_COMP);
+
Arrays.sort(groups, THREADGROUP_COMP);
System.out.println(tg.getName() + ":");
- System.out.println(" " + Arrays.toString(threads));
+ System.out.println(" " + threadList);
System.out.println(" " + Arrays.toString(groups));
if (tg.getParent() != null) {
diff --git a/test/926-multi-obsolescence/run b/test/926-multi-obsolescence/run
index 4379349..c6e62ae 100755
--- a/test/926-multi-obsolescence/run
+++ b/test/926-multi-obsolescence/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/927-timers/run b/test/927-timers/run
index 4379349..c6e62ae 100755
--- a/test/927-timers/run
+++ b/test/927-timers/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/928-jni-table/run b/test/928-jni-table/run
index 4379349..c6e62ae 100755
--- a/test/928-jni-table/run
+++ b/test/928-jni-table/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/929-search/run b/test/929-search/run
index 0a8d067..67923a7 100755
--- a/test/929-search/run
+++ b/test/929-search/run
@@ -17,7 +17,5 @@
# This test checks whether dex files can be injected into parent classloaders. App images preload
# classes, which will make the injection moot. Turn off app images to avoid the issue.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti \
+./default-run "$@" --jvmti \
--no-app-image
diff --git a/test/930-hello-retransform/run b/test/930-hello-retransform/run
index 4379349..c6e62ae 100755
--- a/test/930-hello-retransform/run
+++ b/test/930-hello-retransform/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/931-agent-thread/run b/test/931-agent-thread/run
index 0a8d067..67923a7 100755
--- a/test/931-agent-thread/run
+++ b/test/931-agent-thread/run
@@ -17,7 +17,5 @@
# This test checks whether dex files can be injected into parent classloaders. App images preload
# classes, which will make the injection moot. Turn off app images to avoid the issue.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti \
+./default-run "$@" --jvmti \
--no-app-image
diff --git a/test/932-transform-saves/run b/test/932-transform-saves/run
index 4379349..c6e62ae 100755
--- a/test/932-transform-saves/run
+++ b/test/932-transform-saves/run
@@ -14,6 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/933-misc-events/run b/test/933-misc-events/run
index 0a8d067..67923a7 100755
--- a/test/933-misc-events/run
+++ b/test/933-misc-events/run
@@ -17,7 +17,5 @@
# This test checks whether dex files can be injected into parent classloaders. App images preload
# classes, which will make the injection moot. Turn off app images to avoid the issue.
-./default-run "$@" --experimental agents \
- --experimental runtime-plugins \
- --jvmti \
+./default-run "$@" --jvmti \
--no-app-image
diff --git a/test/934-load-transform/build b/test/934-load-transform/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/934-load-transform/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/934-load-transform/expected.txt b/test/934-load-transform/expected.txt
new file mode 100644
index 0000000..2b60207
--- /dev/null
+++ b/test/934-load-transform/expected.txt
@@ -0,0 +1 @@
+Goodbye
diff --git a/test/934-load-transform/info.txt b/test/934-load-transform/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/934-load-transform/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/934-load-transform/run b/test/934-load-transform/run
new file mode 100755
index 0000000..adb1a1c
--- /dev/null
+++ b/test/934-load-transform/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --no-app-image
diff --git a/test/934-load-transform/src-ex/TestMain.java b/test/934-load-transform/src-ex/TestMain.java
new file mode 100644
index 0000000..33be9cd
--- /dev/null
+++ b/test/934-load-transform/src-ex/TestMain.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestMain {
+ public static void runTest() {
+ new Transform().sayHi();
+ }
+}
diff --git a/test/934-load-transform/src-ex/Transform.java b/test/934-load-transform/src-ex/Transform.java
new file mode 100644
index 0000000..f624c3a
--- /dev/null
+++ b/test/934-load-transform/src-ex/Transform.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ public void sayHi() {
+ throw new Error("Should not be called!");
+ }
+}
diff --git a/test/934-load-transform/src/Main.java b/test/934-load-transform/src/Main.java
new file mode 100644
index 0000000..de312b0
--- /dev/null
+++ b/test/934-load-transform/src/Main.java
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.*;
+import java.util.Base64;
+
+class Main {
+ public static String TEST_NAME = "934-load-transform";
+
+ /**
+ * base64 encoded class/dex file for
+ * class Transform {
+ * public void sayHi() {
+ * System.out.println("Goodbye");
+ * }
+ * }
+ */
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAHAoABgAOCQAPABAIABEKABIAEwcAFAcAFQEABjxpbml0PgEAAygpVgEABENvZGUB" +
+ "AA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA5UcmFuc2Zvcm0uamF2YQwA" +
+ "BwAIBwAWDAAXABgBAAdHb29kYnllBwAZDAAaABsBAAlUcmFuc2Zvcm0BABBqYXZhL2xhbmcvT2Jq" +
+ "ZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwEAE2ph" +
+ "dmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWACAABQAG" +
+ "AAAAAAACAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAEQABAAsACAAB" +
+ "AAkAAAAlAAIAAQAAAAmyAAISA7YABLEAAAABAAoAAAAKAAIAAAATAAgAFAABAAwAAAACAA0=");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCLXSBQ5FiS3f16krSYZFF8xYZtFVp0GRXMAgAAcAAAAHhWNBIAAAAAAAAAACwCAAAO" +
+ "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAACsAQAAIAEAAGIB" +
+ "AABqAQAAcwEAAIABAACXAQAAqwEAAL8BAADTAQAA4wEAAOYBAADqAQAA/gEAAAMCAAAMAgAAAgAA" +
+ "AAMAAAAEAAAABQAAAAYAAAAIAAAACAAAAAUAAAAAAAAACQAAAAUAAABcAQAABAABAAsAAAAAAAAA" +
+ "AAAAAAAAAAANAAAAAQABAAwAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAHAAAAAAAAAB4CAAAA" +
+ "AAAAAQABAAEAAAATAgAABAAAAHAQAwAAAA4AAwABAAIAAAAYAgAACQAAAGIAAAAbAQEAAABuIAIA" +
+ "EAAOAAAAAQAAAAMABjxpbml0PgAHR29vZGJ5ZQALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50" +
+ "U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZhL2xh" +
+ "bmcvU3lzdGVtOwAOVHJhbnNmb3JtLmphdmEAAVYAAlZMABJlbWl0dGVyOiBqYWNrLTMuMzYAA291" +
+ "dAAHcHJpbnRsbgAFc2F5SGkAEQAHDgATAAcOhQAAAAEBAICABKACAQG4Ag0AAAAAAAAAAQAAAAAA" +
+ "AAABAAAADgAAAHAAAAACAAAABgAAAKgAAAADAAAAAgAAAMAAAAAEAAAAAQAAANgAAAAFAAAABAAA" +
+ "AOAAAAAGAAAAAQAAAAABAAABIAAAAgAAACABAAABEAAAAQAAAFwBAAACIAAADgAAAGIBAAADIAAA" +
+ "AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA=");
+
+ public static ClassLoader getClassLoaderFor(String location) throws Exception {
+ try {
+ Class<?> class_loader_class = Class.forName("dalvik.system.PathClassLoader");
+ Constructor<?> ctor = class_loader_class.getConstructor(String.class, ClassLoader.class);
+ /* on Dalvik, this is a DexFile; otherwise, it's null */
+ return (ClassLoader)ctor.newInstance(location + "/" + TEST_NAME + "-ex.jar",
+ Main.class.getClassLoader());
+ } catch (ClassNotFoundException e) {
+ // Running on RI. Use URLClassLoader.
+ return new java.net.URLClassLoader(
+ new java.net.URL[] { new java.net.URL("file://" + location + "/classes-ex/") });
+ }
+ }
+
+ public static void main(String[] args) {
+ // Don't pop transformations. Make sure that even if 2 threads race to define the class both
+ // will get the same result.
+ setPopRetransformations(false);
+ addCommonTransformationResult("Transform", CLASS_BYTES, DEX_BYTES);
+ enableCommonRetransformation(true);
+ try {
+ /* this is the "alternate" DEX/Jar file */
+ ClassLoader new_loader = getClassLoaderFor(System.getenv("DEX_LOCATION"));
+ Class<?> klass = (Class<?>)new_loader.loadClass("TestMain");
+ if (klass == null) {
+ throw new AssertionError("loadClass failed");
+ }
+ Method run_test = klass.getMethod("runTest");
+ run_test.invoke(null);
+ } catch (Exception e) {
+ System.out.println(e.toString());
+ e.printStackTrace();
+ }
+ }
+
+ private static native void setPopRetransformations(boolean should_pop);
+ // Transforms the class
+ private static native void enableCommonRetransformation(boolean enable);
+ private static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+}
diff --git a/test/935-non-retransformable/build b/test/935-non-retransformable/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/935-non-retransformable/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/935-non-retransformable/expected.txt b/test/935-non-retransformable/expected.txt
new file mode 100644
index 0000000..ccd50a6
--- /dev/null
+++ b/test/935-non-retransformable/expected.txt
@@ -0,0 +1,6 @@
+Hello
+Hello
+Goodbye
+Hello
+Hello
+Goodbye
diff --git a/test/935-non-retransformable/info.txt b/test/935-non-retransformable/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/935-non-retransformable/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/935-non-retransformable/run b/test/935-non-retransformable/run
new file mode 100755
index 0000000..adb1a1c
--- /dev/null
+++ b/test/935-non-retransformable/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --no-app-image
diff --git a/test/935-non-retransformable/src-ex/TestMain.java b/test/935-non-retransformable/src-ex/TestMain.java
new file mode 100644
index 0000000..d412fba
--- /dev/null
+++ b/test/935-non-retransformable/src-ex/TestMain.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class TestMain {
+ public static void runTest() throws Exception {
+ Transform t = new Transform();
+ // Call functions with reflection. Since the sayGoodbye function does not exist in the
+ // LTransform; when we compile this for the first time we need to use reflection.
+ Method hi = Transform.class.getMethod("sayHi");
+ Method bye = Transform.class.getMethod("sayGoodbye");
+ hi.invoke(t);
+ t.sayHi();
+ bye.invoke(t);
+ }
+}
diff --git a/test/935-non-retransformable/src-ex/Transform.java b/test/935-non-retransformable/src-ex/Transform.java
new file mode 100644
index 0000000..f624c3a
--- /dev/null
+++ b/test/935-non-retransformable/src-ex/Transform.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ public void sayHi() {
+ throw new Error("Should not be called!");
+ }
+}
diff --git a/test/935-non-retransformable/src/Main.java b/test/935-non-retransformable/src/Main.java
new file mode 100644
index 0000000..82ba197
--- /dev/null
+++ b/test/935-non-retransformable/src/Main.java
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.*;
+import java.util.Base64;
+
+class Main {
+ public static String TEST_NAME = "935-non-retransformable";
+
+ /**
+ * base64 encoded class/dex file for
+ * class Transform {
+ * public void sayHi() {
+ * System.out.println("Hello");
+ * }
+ * public void sayGoodbye() {
+ * System.out.println("Goodbye");
+ * }
+ * }
+ */
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAHwoABwAQCQARABIIABMKABQAFQgAFgcAFwcAGAEABjxpbml0PgEAAygpVgEABENv" +
+ "ZGUBAA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEACnNheUdvb2RieWUBAApTb3VyY2VGaWxlAQAO" +
+ "VHJhbnNmb3JtLmphdmEMAAgACQcAGQwAGgAbAQAFSGVsbG8HABwMAB0AHgEAB0dvb2RieWUBAAlU" +
+ "cmFuc2Zvcm0BABBqYXZhL2xhbmcvT2JqZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxq" +
+ "YXZhL2lvL1ByaW50U3RyZWFtOwEAE2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExq" +
+ "YXZhL2xhbmcvU3RyaW5nOylWACAABgAHAAAAAAADAAAACAAJAAEACgAAAB0AAQABAAAABSq3AAGx" +
+ "AAAAAQALAAAABgABAAAAAQABAAwACQABAAoAAAAlAAIAAQAAAAmyAAISA7YABLEAAAABAAsAAAAK" +
+ "AAIAAAADAAgABAABAA0ACQABAAoAAAAlAAIAAQAAAAmyAAISBbYABLEAAAABAAsAAAAKAAIAAAAG" +
+ "AAgABwABAA4AAAACAA8=");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQDpaN+7jX/ZLl9Jr0HAEV7nqL1YDuakKakgAwAAcAAAAHhWNBIAAAAAAAAAAIACAAAQ" +
+ "AAAAcAAAAAYAAACwAAAAAgAAAMgAAAABAAAA4AAAAAUAAADoAAAAAQAAABABAADwAQAAMAEAAJYB" +
+ "AACeAQAApwEAAK4BAAC7AQAA0gEAAOYBAAD6AQAADgIAAB4CAAAhAgAAJQIAADkCAAA+AgAARwIA" +
+ "AFMCAAADAAAABAAAAAUAAAAGAAAABwAAAAkAAAAJAAAABQAAAAAAAAAKAAAABQAAAJABAAAEAAEA" +
+ "DAAAAAAAAAAAAAAAAAAAAA4AAAAAAAAADwAAAAEAAQANAAAAAgAAAAAAAAAAAAAAAAAAAAIAAAAA" +
+ "AAAACAAAAAAAAABrAgAAAAAAAAEAAQABAAAAWgIAAAQAAABwEAQAAAAOAAMAAQACAAAAXwIAAAkA" +
+ "AABiAAAAGwEBAAAAbiADABAADgAAAAMAAQACAAAAZQIAAAkAAABiAAAAGwECAAAAbiADABAADgAA" +
+ "AAEAAAADAAY8aW5pdD4AB0dvb2RieWUABUhlbGxvAAtMVHJhbnNmb3JtOwAVTGphdmEvaW8vUHJp" +
+ "bnRTdHJlYW07ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEv" +
+ "bGFuZy9TeXN0ZW07AA5UcmFuc2Zvcm0uamF2YQABVgACVkwAEmVtaXR0ZXI6IGphY2stNC4yMgAD" +
+ "b3V0AAdwcmludGxuAApzYXlHb29kYnllAAVzYXlIaQABAAcOAAYABw6HAAMABw6HAAAAAQIAgIAE" +
+ "sAIBAcgCAQHsAgAAAA0AAAAAAAAAAQAAAAAAAAABAAAAEAAAAHAAAAACAAAABgAAALAAAAADAAAA" +
+ "AgAAAMgAAAAEAAAAAQAAAOAAAAAFAAAABQAAAOgAAAAGAAAAAQAAABABAAABIAAAAwAAADABAAAB" +
+ "EAAAAQAAAJABAAACIAAAEAAAAJYBAAADIAAAAwAAAFoCAAAAIAAAAQAAAGsCAAAAEAAAAQAAAIAC" +
+ "AAA=");
+
+
+ public static ClassLoader getClassLoaderFor(String location) throws Exception {
+ try {
+ Class<?> class_loader_class = Class.forName("dalvik.system.PathClassLoader");
+ Constructor<?> ctor = class_loader_class.getConstructor(String.class, ClassLoader.class);
+ /* on Dalvik, this is a DexFile; otherwise, it's null */
+ return (ClassLoader)ctor.newInstance(location + "/" + TEST_NAME + "-ex.jar",
+ Main.class.getClassLoader());
+ } catch (ClassNotFoundException e) {
+ // Running on RI. Use URLClassLoader.
+ return new java.net.URLClassLoader(
+ new java.net.URL[] { new java.net.URL("file://" + location + "/classes-ex/") });
+ }
+ }
+
+ public static void main(String[] args) {
+ setPopRetransformations(false);
+ addCommonTransformationResult("Transform", CLASS_BYTES, DEX_BYTES);
+ enableCommonRetransformation(true);
+ try {
+ /* this is the "alternate" DEX/Jar file */
+ ClassLoader new_loader = getClassLoaderFor(System.getenv("DEX_LOCATION"));
+ Class<?> klass = (Class<?>)new_loader.loadClass("TestMain");
+ if (klass == null) {
+ throw new AssertionError("loadClass failed");
+ }
+ Method run_test = klass.getMethod("runTest");
+ run_test.invoke(null);
+
+ // Remove the original transformation. It has been used by now.
+ popTransformationFor("Transform");
+ // Make sure we don't get called for transformation again.
+ addCommonTransformationResult("Transform", new byte[0], new byte[0]);
+ doCommonClassRetransformation(new_loader.loadClass("Transform"));
+ run_test.invoke(null);
+ } catch (Exception e) {
+ System.out.println(e.toString());
+ e.printStackTrace();
+ }
+ }
+
+ // Transforms the class
+ private static native void doCommonClassRetransformation(Class<?>... classes);
+ private static native void enableCommonRetransformation(boolean enable);
+ private static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+ private static native void setPopRetransformations(boolean should_pop);
+ private static native void popTransformationFor(String target_name);
+}
diff --git a/test/936-search-onload/build b/test/936-search-onload/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/936-search-onload/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/936-search-onload/expected.txt b/test/936-search-onload/expected.txt
new file mode 100644
index 0000000..2eec8e1
--- /dev/null
+++ b/test/936-search-onload/expected.txt
@@ -0,0 +1,3 @@
+B was loaded with boot classloader
+A was loaded with system classloader
+Done
diff --git a/test/936-search-onload/info.txt b/test/936-search-onload/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/936-search-onload/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/936-search-onload/run b/test/936-search-onload/run
new file mode 100755
index 0000000..67923a7
--- /dev/null
+++ b/test/936-search-onload/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This test checks whether dex files can be injected into parent classloaders. App images preload
+# classes, which will make the injection moot. Turn off app images to avoid the issue.
+
+./default-run "$@" --jvmti \
+ --no-app-image
diff --git a/test/936-search-onload/search_onload.cc b/test/936-search-onload/search_onload.cc
new file mode 100644
index 0000000..2286a46
--- /dev/null
+++ b/test/936-search-onload/search_onload.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "search_onload.h"
+
+#include <inttypes.h>
+
+#include "android-base/stringprintf.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedUtfChars.h"
+
+#include "ti-agent/common_helper.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test936SearchOnload {
+
+jint OnLoad(JavaVM* vm,
+ char* options ATTRIBUTE_UNUSED,
+ void* reserved ATTRIBUTE_UNUSED) {
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ SetAllCapabilities(jvmti_env);
+
+ char* dex_loc = getenv("DEX_LOCATION");
+ std::string dex1 = android::base::StringPrintf("%s/936-search-onload.jar", dex_loc);
+ std::string dex2 = android::base::StringPrintf("%s/936-search-onload-ex.jar", dex_loc);
+
+ jvmtiError result = jvmti_env->AddToBootstrapClassLoaderSearch(dex1.c_str());
+ if (result != JVMTI_ERROR_NONE) {
+ printf("Could not add to bootstrap classloader.\n");
+ return 1;
+ }
+
+ result = jvmti_env->AddToSystemClassLoaderSearch(dex2.c_str());
+ if (result != JVMTI_ERROR_NONE) {
+ printf("Could not add to system classloader.\n");
+ return 1;
+ }
+
+ return JNI_OK;
+}
+
+} // namespace Test936SearchOnload
+} // namespace art
diff --git a/test/936-search-onload/search_onload.h b/test/936-search-onload/search_onload.h
new file mode 100644
index 0000000..e556892
--- /dev/null
+++ b/test/936-search-onload/search_onload.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_936_SEARCH_ONLOAD_SEARCH_ONLOAD_H_
+#define ART_TEST_936_SEARCH_ONLOAD_SEARCH_ONLOAD_H_
+
+#include <jni.h>
+
+namespace art {
+namespace Test936SearchOnload {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+} // namespace Test936SearchOnload
+} // namespace art
+
+#endif // ART_TEST_936_SEARCH_ONLOAD_SEARCH_ONLOAD_H_
diff --git a/test/936-search-onload/src-ex/A.java b/test/936-search-onload/src-ex/A.java
new file mode 100644
index 0000000..64acb2f
--- /dev/null
+++ b/test/936-search-onload/src-ex/A.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class A {
+}
\ No newline at end of file
diff --git a/test/936-search-onload/src/B.java b/test/936-search-onload/src/B.java
new file mode 100644
index 0000000..f1458c3
--- /dev/null
+++ b/test/936-search-onload/src/B.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class B {
+}
\ No newline at end of file
diff --git a/test/936-search-onload/src/Main.java b/test/936-search-onload/src/Main.java
new file mode 100644
index 0000000..2e7a871
--- /dev/null
+++ b/test/936-search-onload/src/Main.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ doTest();
+ }
+
+ private static void doTest() throws Exception {
+ doTest(true, "B");
+ doTest(false, "A");
+ System.out.println("Done");
+ }
+
+ private static void doTest(boolean boot, String className) throws Exception {
+ ClassLoader expectedClassLoader;
+ if (boot) {
+ expectedClassLoader = Object.class.getClassLoader();
+ } else {
+ expectedClassLoader = ClassLoader.getSystemClassLoader();
+ }
+
+ Class<?> c = Class.forName(className, false, ClassLoader.getSystemClassLoader());
+ if (c.getClassLoader() != expectedClassLoader) {
+ throw new RuntimeException(className + "(" + boot + "): " +
+ c.getClassLoader() + " vs " + expectedClassLoader);
+ } else {
+ System.out.println(className + " was loaded with " + (boot ? "boot" : "system") +
+ " classloader");
+ }
+ }
+}
diff --git a/test/937-hello-retransform-package/build b/test/937-hello-retransform-package/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/937-hello-retransform-package/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/937-hello-retransform-package/expected.txt b/test/937-hello-retransform-package/expected.txt
new file mode 100644
index 0000000..4774b81
--- /dev/null
+++ b/test/937-hello-retransform-package/expected.txt
@@ -0,0 +1,2 @@
+hello
+Goodbye
diff --git a/test/937-hello-retransform-package/info.txt b/test/937-hello-retransform-package/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/937-hello-retransform-package/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/937-hello-retransform-package/run b/test/937-hello-retransform-package/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/937-hello-retransform-package/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/937-hello-retransform-package/src/Main.java b/test/937-hello-retransform-package/src/Main.java
new file mode 100644
index 0000000..4b9271b
--- /dev/null
+++ b/test/937-hello-retransform-package/src/Main.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+import testing.*;
+public class Main {
+
+ /**
+ * base64 encoded class/dex file for
+ * package testing;
+ * class Transform {
+ * public void sayHi() {
+ * System.out.println("Goodbye");
+ * }
+ * }
+ */
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAHAoABgAOCQAPABAIABEKABIAEwcAFAcAFQEABjxpbml0PgEAAygpVgEABENvZGUB" +
+ "AA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA5UcmFuc2Zvcm0uamF2YQwA" +
+ "BwAIBwAWDAAXABgBAAdHb29kYnllBwAZDAAaABsBABF0ZXN0aW5nL1RyYW5zZm9ybQEAEGphdmEv" +
+ "bGFuZy9PYmplY3QBABBqYXZhL2xhbmcvU3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJl" +
+ "YW07AQATamF2YS9pby9QcmludFN0cmVhbQEAB3ByaW50bG4BABUoTGphdmEvbGFuZy9TdHJpbmc7" +
+ "KVYAIQAFAAYAAAAAAAIAAQAHAAgAAQAJAAAAHQABAAEAAAAFKrcAAbEAAAABAAoAAAAGAAEAAAAC" +
+ "AAEACwAIAAEACQAAACUAAgABAAAACbIAAhIDtgAEsQAAAAEACgAAAAoAAgAAAAQACAAFAAEADAAA" +
+ "AAIADQ==");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQBhYIi3Gs9Nn/GN1fCzF+aFQ0AbhA1h1WHUAgAAcAAAAHhWNBIAAAAAAAAAADQCAAAO" +
+ "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAAC0AQAAIAEAAGIB" +
+ "AABqAQAAcwEAAIoBAACeAQAAsgEAAMYBAADbAQAA6wEAAO4BAADyAQAABgIAAAsCAAAUAgAAAgAA" +
+ "AAMAAAAEAAAABQAAAAYAAAAIAAAACAAAAAUAAAAAAAAACQAAAAUAAABcAQAAAwAAAAsAAAAAAAEA" +
+ "DAAAAAEAAAAAAAAABAAAAAAAAAAEAAAADQAAAAQAAAABAAAAAQAAAAAAAAAHAAAAAAAAACYCAAAA" +
+ "AAAAAQABAAEAAAAbAgAABAAAAHAQAQAAAA4AAwABAAIAAAAgAgAACQAAAGIAAAAbAQEAAABuIAAA" +
+ "EAAOAAAAAQAAAAIABjxpbml0PgAHR29vZGJ5ZQAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABJMamF2" +
+ "YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07ABNM" +
+ "dGVzdGluZy9UcmFuc2Zvcm07AA5UcmFuc2Zvcm0uamF2YQABVgACVkwAEmVtaXR0ZXI6IGphY2st" +
+ "NC4yMgADb3V0AAdwcmludGxuAAVzYXlIaQACAAcOAAQABw6HAAAAAQECgYAEoAIDAbgCDQAAAAAA" +
+ "AAABAAAAAAAAAAEAAAAOAAAAcAAAAAIAAAAGAAAAqAAAAAMAAAACAAAAwAAAAAQAAAABAAAA2AAA" +
+ "AAUAAAAEAAAA4AAAAAYAAAABAAAAAAEAAAEgAAACAAAAIAEAAAEQAAABAAAAXAEAAAIgAAAOAAAA" +
+ "YgEAAAMgAAACAAAAGwIAAAAgAAABAAAAJgIAAAAQAAABAAAANAIAAA==");
+
+ public static void main(String[] args) {
+ doTest(new Transform());
+ }
+
+ public static void doTest(Transform t) {
+ t.sayHi();
+ addCommonTransformationResult("testing/Transform", CLASS_BYTES, DEX_BYTES);
+ enableCommonRetransformation(true);
+ doCommonClassRetransformation(Transform.class);
+ t.sayHi();
+ }
+
+ // Transforms the class
+ private static native void doCommonClassRetransformation(Class<?>... target);
+ private static native void enableCommonRetransformation(boolean enable);
+ private static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+}
diff --git a/test/937-hello-retransform-package/src/Transform.java b/test/937-hello-retransform-package/src/Transform.java
new file mode 100644
index 0000000..db92612
--- /dev/null
+++ b/test/937-hello-retransform-package/src/Transform.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package testing;
+public class Transform {
+ public void sayHi() {
+ System.out.println("hello");
+ }
+}
diff --git a/test/938-load-transform-bcp/build b/test/938-load-transform-bcp/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/938-load-transform-bcp/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/938-load-transform-bcp/expected.txt b/test/938-load-transform-bcp/expected.txt
new file mode 100644
index 0000000..16c3f8f
--- /dev/null
+++ b/test/938-load-transform-bcp/expected.txt
@@ -0,0 +1,2 @@
+ol.foo() -> 'This is foo for val=123'
+ol.toString() -> 'This is toString() for val=123'
diff --git a/test/938-load-transform-bcp/info.txt b/test/938-load-transform-bcp/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/938-load-transform-bcp/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/938-load-transform-bcp/run b/test/938-load-transform-bcp/run
new file mode 100755
index 0000000..adb1a1c
--- /dev/null
+++ b/test/938-load-transform-bcp/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --no-app-image
diff --git a/test/938-load-transform-bcp/src-ex/TestMain.java b/test/938-load-transform-bcp/src-ex/TestMain.java
new file mode 100644
index 0000000..3757a0f
--- /dev/null
+++ b/test/938-load-transform-bcp/src-ex/TestMain.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.util.OptionalLong;
+public class TestMain {
+ public static void runTest() {
+ // This should be our redefined OptionalLong.
+ OptionalLong ol = OptionalLong.of(123);
+ try {
+ // OptionalLong is a class that is unlikely to be used by the time this test starts.
+ Method foo = OptionalLong.class.getMethod("foo");
+ System.out.println("ol.foo() -> '" + (String)foo.invoke(ol) + "'");
+ System.out.println("ol.toString() -> '" + ol.toString() + "'");
+ } catch (Exception e) {
+ System.out.println(
+ "Exception occured (did something load OptionalLong before this test method!: "
+ + e.toString());
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/test/938-load-transform-bcp/src/Main.java b/test/938-load-transform-bcp/src/Main.java
new file mode 100644
index 0000000..5484899
--- /dev/null
+++ b/test/938-load-transform-bcp/src/Main.java
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.*;
+import java.util.Base64;
+
+class Main {
+ public static String TEST_NAME = "938-load-transform-bcp";
+
+ /**
+ * base64 encoded class/dex file for
+ *
+ * // Yes this version of OptionalLong is not compatible with the real one but since it isn't used
+ * // for anything in the runtime initialization it should be fine.
+ *
+ * package java.util;
+ * public final class OptionalLong {
+ * private long val;
+ *
+ * private OptionalLong(long abc) {
+ * this.val = abc;
+ * }
+ *
+ * public static OptionalLong of(long abc) {
+ * return new OptionalLong(abc);
+ * }
+ *
+ * public String foo() {
+ * return "This is foo for val=" + val;
+ * }
+ *
+ * public String toString() {
+ * return "This is toString() for val=" + val;
+ * }
+ * }
+ */
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAKQoADAAaCQADABsHABwKAAMAHQcAHgoABQAaCAAfCgAFACAKAAUAIQoABQAiCAAj" +
+ "BwAkAQADdmFsAQABSgEABjxpbml0PgEABChKKVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAC" +
+ "b2YBABsoSilMamF2YS91dGlsL09wdGlvbmFsTG9uZzsBAANmb28BABQoKUxqYXZhL2xhbmcvU3Ry" +
+ "aW5nOwEACHRvU3RyaW5nAQAKU291cmNlRmlsZQEAEU9wdGlvbmFsTG9uZy5qYXZhDAAPACUMAA0A" +
+ "DgEAFmphdmEvdXRpbC9PcHRpb25hbExvbmcMAA8AEAEAF2phdmEvbGFuZy9TdHJpbmdCdWlsZGVy" +
+ "AQAUVGhpcyBpcyBmb28gZm9yIHZhbD0MACYAJwwAJgAoDAAXABYBABtUaGlzIGlzIHRvU3RyaW5n" +
+ "KCkgZm9yIHZhbD0BABBqYXZhL2xhbmcvT2JqZWN0AQADKClWAQAGYXBwZW5kAQAtKExqYXZhL2xh" +
+ "bmcvU3RyaW5nOylMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7AQAcKEopTGphdmEvbGFuZy9TdHJp" +
+ "bmdCdWlsZGVyOwAxAAMADAAAAAEAAgANAA4AAAAEAAIADwAQAAEAEQAAACoAAwADAAAACiq3AAEq" +
+ "H7UAArEAAAABABIAAAAOAAMAAAAFAAQABgAJAAcACQATABQAAQARAAAAIQAEAAIAAAAJuwADWR63" +
+ "AASwAAAAAQASAAAABgABAAAACgABABUAFgABABEAAAAvAAMAAQAAABe7AAVZtwAGEge2AAgqtAAC" +
+ "tgAJtgAKsAAAAAEAEgAAAAYAAQAAAA4AAQAXABYAAQARAAAALwADAAEAAAAXuwAFWbcABhILtgAI" +
+ "KrQAArYACbYACrAAAAABABIAAAAGAAEAAAASAAEAGAAAAAIAGQ==");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQAOe/TYJCvVthTToFA3tveMDhwTo7uDf0IcBAAAcAAAAHhWNBIAAAAAAAAAAHwDAAAU" +
+ "AAAAcAAAAAYAAADAAAAABgAAANgAAAABAAAAIAEAAAkAAAAoAQAAAQAAAHABAACMAgAAkAEAAFYC" +
+ "AABeAgAAYQIAAGQCAABoAgAAbAIAAIACAACUAgAArwIAAMkCAADcAgAA8gIAAA8DAAASAwAAFgMA" +
+ "AB4DAAAyAwAANwMAADsDAABFAwAAAQAAAAUAAAAGAAAABwAAAAgAAAAMAAAAAgAAAAIAAAAAAAAA" +
+ "AwAAAAMAAABIAgAABAAAAAMAAABQAgAAAwAAAAQAAABIAgAADAAAAAUAAAAAAAAADQAAAAUAAABI" +
+ "AgAABAAAABMAAAABAAQAAAAAAAMABAAAAAAAAwABAA4AAAADAAIADgAAAAMAAAASAAAABAAFAAAA" +
+ "AAAEAAAAEAAAAAQAAwARAAAABAAAABIAAAAEAAAAEQAAAAEAAAAAAAAACQAAAAAAAABiAwAAAAAA" +
+ "AAQAAwABAAAASgMAAAYAAABwEAAAAQBaEgAADgAEAAIAAwAAAFIDAAAGAAAAIgAEAHAwBQAgAxEA" +
+ "BQABAAMAAABYAwAAFwAAACIAAwBwEAEAAAAbAQoAAABuIAMAEAAMAFNCAABuMAIAIAMMAG4QBAAA" +
+ "AAwAEQAAAAUAAQADAAAAXQMAABcAAAAiAAMAcBABAAAAGwELAAAAbiADABAADABTQgAAbjACACAD" +
+ "DABuEAQAAAAMABEAAAABAAAAAAAAAAEAAAACAAY8aW5pdD4AAUoAAUwAAkxKAAJMTAASTGphdmEv" +
+ "bGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAGUxqYXZhL2xhbmcvU3RyaW5nQnVpbGRl" +
+ "cjsAGExqYXZhL3V0aWwvT3B0aW9uYWxMb25nOwART3B0aW9uYWxMb25nLmphdmEAFFRoaXMgaXMg" +
+ "Zm9vIGZvciB2YWw9ABtUaGlzIGlzIHRvU3RyaW5nKCkgZm9yIHZhbD0AAVYAAlZKAAZhcHBlbmQA" +
+ "EmVtaXR0ZXI6IGphY2stNC4yMgADZm9vAAJvZgAIdG9TdHJpbmcAA3ZhbAAFAQAHDjwtAAoBAAcO" +
+ "AA4ABw4AEgAHDgAAAQICAAIFgoAEkAMCCawDBgHIAwIBiAQAAA0AAAAAAAAAAQAAAAAAAAABAAAA" +
+ "FAAAAHAAAAACAAAABgAAAMAAAAADAAAABgAAANgAAAAEAAAAAQAAACABAAAFAAAACQAAACgBAAAG" +
+ "AAAAAQAAAHABAAABIAAABAAAAJABAAABEAAAAgAAAEgCAAACIAAAFAAAAFYCAAADIAAABAAAAEoD" +
+ "AAAAIAAAAQAAAGIDAAAAEAAAAQAAAHwDAAA=");
+
+ public static ClassLoader getClassLoaderFor(String location) throws Exception {
+ try {
+ Class<?> class_loader_class = Class.forName("dalvik.system.PathClassLoader");
+ Constructor<?> ctor = class_loader_class.getConstructor(String.class, ClassLoader.class);
+ return (ClassLoader)ctor.newInstance(location + "/" + TEST_NAME + "-ex.jar",
+ Main.class.getClassLoader());
+ } catch (ClassNotFoundException e) {
+ // Running on RI. Use URLClassLoader.
+ return new java.net.URLClassLoader(
+ new java.net.URL[] { new java.net.URL("file://" + location + "/classes-ex/") });
+ }
+ }
+
+ public static void main(String[] args) {
+ setPopRetransformations(false);
+ addCommonTransformationResult("java/util/OptionalLong", CLASS_BYTES, DEX_BYTES);
+ enableCommonRetransformation(true);
+ try {
+ /* this is the "alternate" DEX/Jar file */
+ ClassLoader new_loader = getClassLoaderFor(System.getenv("DEX_LOCATION"));
+ Class<?> klass = (Class<?>)new_loader.loadClass("TestMain");
+ if (klass == null) {
+ throw new AssertionError("loadClass failed");
+ }
+ Method run_test = klass.getMethod("runTest");
+ run_test.invoke(null);
+ } catch (Exception e) {
+ System.out.println(e.toString());
+ e.printStackTrace();
+ }
+ }
+
+ private static native void setPopRetransformations(boolean should_pop);
+ // Transforms the class
+ private static native void enableCommonRetransformation(boolean enable);
+ private static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+}
diff --git a/test/939-hello-transformation-bcp/build b/test/939-hello-transformation-bcp/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/939-hello-transformation-bcp/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/939-hello-transformation-bcp/expected.txt b/test/939-hello-transformation-bcp/expected.txt
new file mode 100644
index 0000000..90fd258
--- /dev/null
+++ b/test/939-hello-transformation-bcp/expected.txt
@@ -0,0 +1,3 @@
+ol.toString() -> 'OptionalLong[-559038737]'
+Redefining OptionalLong!
+ol.toString() -> 'Redefined OptionalLong!'
diff --git a/test/939-hello-transformation-bcp/info.txt b/test/939-hello-transformation-bcp/info.txt
new file mode 100644
index 0000000..d230a38
--- /dev/null
+++ b/test/939-hello-transformation-bcp/info.txt
@@ -0,0 +1,6 @@
+Tests basic functions in the jvmti plugin.
+
+Note this function is reliant on the definition of java.util.OptionalLong not
+changing. If this classes definition changes we will need to update this class
+so that the CLASS_BYTES and DEX_BYTES fields contain dex/class bytes for an
+OptionalLong with all the same methods and fields.
diff --git a/test/939-hello-transformation-bcp/run b/test/939-hello-transformation-bcp/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/939-hello-transformation-bcp/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/939-hello-transformation-bcp/src/Main.java b/test/939-hello-transformation-bcp/src/Main.java
new file mode 100644
index 0000000..bdf7f59
--- /dev/null
+++ b/test/939-hello-transformation-bcp/src/Main.java
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+import java.util.OptionalLong;
+public class Main {
+
+ /**
+ * This is the base64 encoded class/dex.
+ *
+ * package java.util;
+ * import java.util.function.LongConsumer;
+ * import java.util.function.LongSupplier;
+ * import java.util.function.Supplier;
+ * public final class OptionalLong {
+ * // Make sure we have a <clinit> function since the real implementation of OptionalLong does.
+ * static { EMPTY = null; }
+ * private static final OptionalLong EMPTY;
+ * private final boolean isPresent;
+ * private final long value;
+ * private OptionalLong() { isPresent = false; value = 0; }
+ * private OptionalLong(long l) { this(); }
+ * public static OptionalLong empty() { return null; }
+ * public static OptionalLong of(long value) { return null; }
+ * public long getAsLong() { return 0; }
+ * public boolean isPresent() { return false; }
+ * public void ifPresent(LongConsumer c) { }
+ * public long orElse(long l) { return 0; }
+ * public long orElseGet(LongSupplier s) { return 0; }
+ * public<X extends Throwable> long orElseThrow(Supplier<X> s) throws X { return 0; }
+ * public boolean equals(Object o) { return false; }
+ * public int hashCode() { return 0; }
+ * public String toString() { return "Redefined OptionalLong!"; }
+ * }
+ */
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAOAoACAAwCQAHADEJAAcAMgoABwAwCAAzCQAHADQHADUHADYBAAVFTVBUWQEAGExq" +
+ "YXZhL3V0aWwvT3B0aW9uYWxMb25nOwEACWlzUHJlc2VudAEAAVoBAAV2YWx1ZQEAAUoBAAY8aW5p" +
+ "dD4BAAMoKVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAEKEopVgEABWVtcHR5AQAaKClMamF2" +
+ "YS91dGlsL09wdGlvbmFsTG9uZzsBAAJvZgEAGyhKKUxqYXZhL3V0aWwvT3B0aW9uYWxMb25nOwEA" +
+ "CWdldEFzTG9uZwEAAygpSgEAAygpWgEACWlmUHJlc2VudAEAJChMamF2YS91dGlsL2Z1bmN0aW9u" +
+ "L0xvbmdDb25zdW1lcjspVgEABm9yRWxzZQEABChKKUoBAAlvckVsc2VHZXQBACQoTGphdmEvdXRp" +
+ "bC9mdW5jdGlvbi9Mb25nU3VwcGxpZXI7KUoBAAtvckVsc2VUaHJvdwEAIChMamF2YS91dGlsL2Z1" +
+ "bmN0aW9uL1N1cHBsaWVyOylKAQAKRXhjZXB0aW9ucwcANwEACVNpZ25hdHVyZQEAQjxYOkxqYXZh" +
+ "L2xhbmcvVGhyb3dhYmxlOz4oTGphdmEvdXRpbC9mdW5jdGlvbi9TdXBwbGllcjxUWDs+OylKXlRY" +
+ "OwEABmVxdWFscwEAFShMamF2YS9sYW5nL09iamVjdDspWgEACGhhc2hDb2RlAQADKClJAQAIdG9T" +
+ "dHJpbmcBABQoKUxqYXZhL2xhbmcvU3RyaW5nOwEACDxjbGluaXQ+AQAKU291cmNlRmlsZQEAEU9w" +
+ "dGlvbmFsTG9uZy5qYXZhDAAPABAMAAsADAwADQAOAQAXUmVkZWZpbmVkIE9wdGlvbmFsTG9uZyEM" +
+ "AAkACgEAFmphdmEvdXRpbC9PcHRpb25hbExvbmcBABBqYXZhL2xhbmcvT2JqZWN0AQATamF2YS9s" +
+ "YW5nL1Rocm93YWJsZQAxAAcACAAAAAMAGgAJAAoAAAASAAsADAAAABIADQAOAAAADgACAA8AEAAB" +
+ "ABEAAAAnAAMAAQAAAA8qtwABKgO1AAIqCbUAA7EAAAABABIAAAAGAAEAAAALAAIADwATAAEAEQAA" +
+ "AB0AAQADAAAABSq3AASxAAAAAQASAAAABgABAAAADAAJABQAFQABABEAAAAaAAEAAAAAAAIBsAAA" +
+ "AAEAEgAAAAYAAQAAAA0ACQAWABcAAQARAAAAGgABAAIAAAACAbAAAAABABIAAAAGAAEAAAAOAAEA" +
+ "GAAZAAEAEQAAABoAAgABAAAAAgmtAAAAAQASAAAABgABAAAADwABAAsAGgABABEAAAAaAAEAAQAA" +
+ "AAIDrAAAAAEAEgAAAAYAAQAAABAAAQAbABwAAQARAAAAGQAAAAIAAAABsQAAAAEAEgAAAAYAAQAA" +
+ "ABEAAQAdAB4AAQARAAAAGgACAAMAAAACCa0AAAABABIAAAAGAAEAAAASAAEAHwAgAAEAEQAAABoA" +
+ "AgACAAAAAgmtAAAAAQASAAAABgABAAAAEwABACEAIgADABEAAAAaAAIAAgAAAAIJrQAAAAEAEgAA" +
+ "AAYAAQAAABQAIwAAAAQAAQAkACUAAAACACYAAQAnACgAAQARAAAAGgABAAIAAAACA6wAAAABABIA" +
+ "AAAGAAEAAAAVAAEAKQAqAAEAEQAAABoAAQABAAAAAgOsAAAAAQASAAAABgABAAAAFgABACsALAAB" +
+ "ABEAAAAbAAEAAQAAAAMSBbAAAAABABIAAAAGAAEAAAAXAAgALQAQAAEAEQAAAB0AAQAAAAAABQGz" +
+ "AAaxAAAAAQASAAAABgABAAAABwABAC4AAAACAC8=");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCvAoivSJqk6GdYOgJmvrM/b2/flxhw99q8BwAAcAAAAHhWNBIAAAAAAAAAAPgGAAAq" +
+ "AAAAcAAAAA0AAAAYAQAADQAAAEwBAAADAAAA6AEAAA8AAAAAAgAAAQAAAHgCAAAkBQAAmAIAACoE" +
+ "AAA4BAAAPQQAAEcEAABPBAAAUwQAAFoEAABdBAAAYAQAAGQEAABoBAAAawQAAG8EAACOBAAAqgQA" +
+ "AL4EAADSBAAA6QQAAAMFAAAmBQAASQUAAGcFAACGBQAAmQUAALIFAAC1BQAAuQUAAL0FAADABQAA" +
+ "xAUAANgFAADfBQAA5wUAAPIFAAD8BQAABwYAABIGAAAWBgAAHgYAACkGAAA2BgAAQAYAAAYAAAAH" +
+ "AAAADAAAAA0AAAAOAAAADwAAABAAAAARAAAAEgAAABMAAAAVAAAAGAAAABsAAAAGAAAAAAAAAAAA" +
+ "AAAHAAAAAQAAAAAAAAAIAAAAAQAAAAQEAAAJAAAAAQAAAAwEAAAJAAAAAQAAABQEAAAKAAAABQAA" +
+ "AAAAAAAKAAAABwAAAAAAAAALAAAABwAAAAQEAAAYAAAACwAAAAAAAAAZAAAACwAAAAQEAAAaAAAA" +
+ "CwAAABwEAAAbAAAADAAAAAAAAAAcAAAADAAAACQEAAAHAAcABQAAAAcADAAjAAAABwABACkAAAAE" +
+ "AAgAAwAAAAcACAACAAAABwAIAAMAAAAHAAkAAwAAAAcABgAeAAAABwAMAB8AAAAHAAEAIAAAAAcA" +
+ "AAAhAAAABwAKACIAAAAHAAsAIwAAAAcABwAkAAAABwACACUAAAAHAAMAJgAAAAcABAAnAAAABwAF" +
+ "ACgAAAAHAAAAEQAAAAQAAAAAAAAAFgAAAOwDAACtBgAAAAAAAAIAAACVBgAApQYAAAEAAAAAAAAA" +
+ "RwYAAAQAAAASAGkAAAAOAAMAAQABAAAATQYAAAsAAABwEAAAAgASAFwgAQAWAAAAWiACAA4AAAAD" +
+ "AAMAAQAAAFIGAAAEAAAAcBACAAAADgABAAAAAAAAAFgGAAACAAAAEgARAAMAAgAAAAAAXQYAAAIA" +
+ "AAASABEAAwACAAAAAABjBgAAAgAAABIADwADAAEAAAAAAGkGAAADAAAAFgAAABAAAAACAAEAAAAA" +
+ "AG4GAAACAAAAEgAPAAIAAgAAAAAAcwYAAAEAAAAOAAAAAgABAAAAAAB5BgAAAgAAABIADwAFAAMA" +
+ "AAAAAH4GAAADAAAAFgAAABAAAAAEAAIAAAAAAIQGAAADAAAAFgAAABAAAAAEAAIAAAAAAIoGAAAD" +
+ "AAAAFgAAABAAAAACAAEAAAAAAJAGAAAEAAAAGwAXAAAAEQAAAAAAAAAAAAEAAAAAAAAADQAAAJgC" +
+ "AAABAAAAAQAAAAEAAAAJAAAAAQAAAAoAAAABAAAACAAAAAEAAAAEAAw8VFg7PjspSl5UWDsAAzxY" +
+ "OgAIPGNsaW5pdD4ABjxpbml0PgACPigABUVNUFRZAAFJAAFKAAJKSgACSkwAAUwAAkxKAB1MZGFs" +
+ "dmlrL2Fubm90YXRpb24vU2lnbmF0dXJlOwAaTGRhbHZpay9hbm5vdGF0aW9uL1Rocm93czsAEkxq" +
+ "YXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABVMamF2YS9sYW5nL1Rocm93YWJs" +
+ "ZTsAGExqYXZhL3V0aWwvT3B0aW9uYWxMb25nOwAhTGphdmEvdXRpbC9mdW5jdGlvbi9Mb25nQ29u" +
+ "c3VtZXI7ACFMamF2YS91dGlsL2Z1bmN0aW9uL0xvbmdTdXBwbGllcjsAHExqYXZhL3V0aWwvZnVu" +
+ "Y3Rpb24vU3VwcGxpZXIAHUxqYXZhL3V0aWwvZnVuY3Rpb24vU3VwcGxpZXI7ABFPcHRpb25hbExv" +
+ "bmcuamF2YQAXUmVkZWZpbmVkIE9wdGlvbmFsTG9uZyEAAVYAAlZKAAJWTAABWgACWkwAEmVtaXR0" +
+ "ZXI6IGphY2stNC4yMgAFZW1wdHkABmVxdWFscwAJZ2V0QXNMb25nAAhoYXNoQ29kZQAJaWZQcmVz" +
+ "ZW50AAlpc1ByZXNlbnQAAm9mAAZvckVsc2UACW9yRWxzZUdldAALb3JFbHNlVGhyb3cACHRvU3Ry" +
+ "aW5nAAV2YWx1ZQAHAAcOOQALAAcOAAwBAAcOAA0ABw4ADgEABw4AFQEABw4ADwAHDgAWAAcOABEB" +
+ "AAcOABAABw4AEgEABw4AEwEABw4AFAEABw4AFwAHDgACAgEpHAUXARcQFwQXFBcAAgMBKRwBGAYB" +
+ "AgUJABoBEgESAYiABKQFAYKABLwFAYKABOQFAQn8BQYJkAYFAaQGAQG4BgEB0AYBAeQGAQH4BgIB" +
+ "jAcBAaQHAQG8BwEB1AcAAAAQAAAAAAAAAAEAAAAAAAAAAQAAACoAAABwAAAAAgAAAA0AAAAYAQAA" +
+ "AwAAAA0AAABMAQAABAAAAAMAAADoAQAABQAAAA8AAAAAAgAABgAAAAEAAAB4AgAAAxAAAAEAAACY" +
+ "AgAAASAAAA4AAACkAgAABiAAAAEAAADsAwAAARAAAAUAAAAEBAAAAiAAACoAAAAqBAAAAyAAAA4A" +
+ "AABHBgAABCAAAAIAAACVBgAAACAAAAEAAACtBgAAABAAAAEAAAD4BgAA");
+
+ public static void main(String[] args) {
+ // OptionalLong is a class that is unlikely to be used by the time this test starts and is not
+ // likely to be changed in any meaningful way in the future.
+ OptionalLong ol = OptionalLong.of(0xDEADBEEF);
+ System.out.println("ol.toString() -> '" + ol.toString() + "'");
+ System.out.println("Redefining OptionalLong!");
+ doCommonClassRedefinition(OptionalLong.class, CLASS_BYTES, DEX_BYTES);
+ System.out.println("ol.toString() -> '" + ol.toString() + "'");
+ }
+
+ // Transforms the class
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] class_file,
+ byte[] dex_file);
+}
diff --git a/test/940-recursive-obsolete/build b/test/940-recursive-obsolete/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/940-recursive-obsolete/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/940-recursive-obsolete/expected.txt b/test/940-recursive-obsolete/expected.txt
new file mode 100644
index 0000000..18ffc25
--- /dev/null
+++ b/test/940-recursive-obsolete/expected.txt
@@ -0,0 +1,21 @@
+hello2
+hello1
+Not doing anything here
+hello0
+goodbye0
+goodbye1
+goodbye2
+hello2
+hello1
+transforming calling function
+Hello0 - transformed
+Goodbye0 - transformed
+goodbye1
+goodbye2
+Hello2 - transformed
+Hello1 - transformed
+Not doing anything here
+Hello0 - transformed
+Goodbye0 - transformed
+Goodbye1 - transformed
+Goodbye2 - transformed
diff --git a/test/940-recursive-obsolete/info.txt b/test/940-recursive-obsolete/info.txt
new file mode 100644
index 0000000..c8b892c
--- /dev/null
+++ b/test/940-recursive-obsolete/info.txt
@@ -0,0 +1 @@
+Tests basic obsolete method support
diff --git a/test/940-recursive-obsolete/run b/test/940-recursive-obsolete/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/940-recursive-obsolete/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/940-recursive-obsolete/src/Main.java b/test/940-recursive-obsolete/src/Main.java
new file mode 100644
index 0000000..3766906
--- /dev/null
+++ b/test/940-recursive-obsolete/src/Main.java
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+public class Main {
+
+ // class Transform {
+ // public void sayHi(int recur, Runnable r) {
+ // System.out.println("Hello" + recur + " - transformed");
+ // if (recur == 1) {
+ // r.run();
+ // sayHi(recur - 1, r);
+ // } else if (recur != 0) {
+ // sayHi(recur - 1, r);
+ // }
+ // System.out.println("Goodbye" + recur + " - transformed");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQANwoADwAZCQAaABsHABwKAAMAGQgAHQoAAwAeCgADAB8IACAKAAMAIQoAIgAjCwAk" +
+ "ACUKAA4AJggAJwcAKAcAKQEABjxpbml0PgEAAygpVgEABENvZGUBAA9MaW5lTnVtYmVyVGFibGUB" +
+ "AAVzYXlIaQEAGChJTGphdmEvbGFuZy9SdW5uYWJsZTspVgEADVN0YWNrTWFwVGFibGUBAApTb3Vy" +
+ "Y2VGaWxlAQAOVHJhbnNmb3JtLmphdmEMABAAEQcAKgwAKwAsAQAXamF2YS9sYW5nL1N0cmluZ0J1" +
+ "aWxkZXIBAAVIZWxsbwwALQAuDAAtAC8BAA4gLSB0cmFuc2Zvcm1lZAwAMAAxBwAyDAAzADQHADUM" +
+ "ADYAEQwAFAAVAQAHR29vZGJ5ZQEACVRyYW5zZm9ybQEAEGphdmEvbGFuZy9PYmplY3QBABBqYXZh" +
+ "L2xhbmcvU3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07AQAGYXBwZW5kAQAtKExq" +
+ "YXZhL2xhbmcvU3RyaW5nOylMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7AQAcKEkpTGphdmEvbGFu" +
+ "Zy9TdHJpbmdCdWlsZGVyOwEACHRvU3RyaW5nAQAUKClMamF2YS9sYW5nL1N0cmluZzsBABNqYXZh" +
+ "L2lvL1ByaW50U3RyZWFtAQAHcHJpbnRsbgEAFShMamF2YS9sYW5nL1N0cmluZzspVgEAEmphdmEv" +
+ "bGFuZy9SdW5uYWJsZQEAA3J1bgAgAA4ADwAAAAAAAgAAABAAEQABABIAAAAdAAEAAQAAAAUqtwAB" +
+ "sQAAAAEAEwAAAAYAAQAAAAEAAQAUABUAAQASAAAAnQADAAMAAABfsgACuwADWbcABBIFtgAGG7YA" +
+ "BxIItgAGtgAJtgAKGwSgABQsuQALAQAqGwRkLLYADKcADxuZAAsqGwRkLLYADLIAArsAA1m3AAQS" +
+ "DbYABhu2AAcSCLYABrYACbYACrEAAAACABMAAAAiAAgAAAADAB4ABAAjAAUAKQAGADQABwA4AAgA" +
+ "QAAKAF4ACwAWAAAABAACNAsAAQAXAAAAAgAY");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQA3pkIgnymz2/eri+mp2dyZo3jolQmaRPKEBAAAcAAAAHhWNBIAAAAAAAAAAOQDAAAa" +
+ "AAAAcAAAAAkAAADYAAAABgAAAPwAAAABAAAARAEAAAkAAABMAQAAAQAAAJQBAADQAgAAtAEAAJwC" +
+ "AACsAgAAtAIAAL0CAADEAgAAxwIAAMoCAADOAgAA0gIAAN8CAAD2AgAACgMAACADAAA0AwAATwMA" +
+ "AGMDAABzAwAAdgMAAHsDAAB/AwAAhwMAAJsDAACgAwAAqQMAAK4DAAC1AwAABAAAAAgAAAAJAAAA" +
+ "CgAAAAsAAAAMAAAADQAAAA4AAAAQAAAABQAAAAUAAAAAAAAABgAAAAYAAACEAgAABwAAAAYAAACM" +
+ "AgAAEAAAAAgAAAAAAAAAEQAAAAgAAACUAgAAEgAAAAgAAACMAgAABwACABUAAAABAAMAAQAAAAEA" +
+ "BAAYAAAAAgAFABYAAAADAAMAAQAAAAQAAwAXAAAABgADAAEAAAAGAAEAEwAAAAYAAgATAAAABgAA" +
+ "ABkAAAABAAAAAAAAAAMAAAAAAAAADwAAAAAAAADWAwAAAAAAAAEAAQABAAAAvwMAAAQAAABwEAMA" +
+ "AAAOAAYAAwADAAAAxAMAAFQAAABiAAAAIgEGAHAQBQABABsCAwAAAG4gBwAhAAwBbiAGAEEADAEb" +
+ "AgAAAABuIAcAIQAMAW4QCAABAAwBbiACABAAEhAzBCsAchAEAAUA2AAE/24wAQADBWIAAAAiAQYA" +
+ "cBAFAAEAGwICAAAAbiAHACEADAFuIAYAQQAMARsCAAAAAG4gBwAhAAwBbhAIAAEADAFuIAIAEAAO" +
+ "ADgE3//YAAT/bjABAAMFKNgBAAAAAAAAAAEAAAAFAAAAAgAAAAAABAAOIC0gdHJhbnNmb3JtZWQA" +
+ "Bjxpbml0PgAHR29vZGJ5ZQAFSGVsbG8AAUkAAUwAAkxJAAJMTAALTFRyYW5zZm9ybTsAFUxqYXZh" +
+ "L2lvL1ByaW50U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABRMamF2YS9sYW5nL1J1bm5hYmxl" +
+ "OwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7ABJMamF2YS9s" +
+ "YW5nL1N5c3RlbTsADlRyYW5zZm9ybS5qYXZhAAFWAANWSUwAAlZMAAZhcHBlbmQAEmVtaXR0ZXI6" +
+ "IGphY2stNC4yNAADb3V0AAdwcmludGxuAANydW4ABXNheUhpAAh0b1N0cmluZwABAAcOAAMCAAAH" +
+ "DgEgDzw8XQEgDxktAAAAAQEAgIAEtAMBAcwDDQAAAAAAAAABAAAAAAAAAAEAAAAaAAAAcAAAAAIA" +
+ "AAAJAAAA2AAAAAMAAAAGAAAA/AAAAAQAAAABAAAARAEAAAUAAAAJAAAATAEAAAYAAAABAAAAlAEA" +
+ "AAEgAAACAAAAtAEAAAEQAAADAAAAhAIAAAIgAAAaAAAAnAIAAAMgAAACAAAAvwMAAAAgAAABAAAA" +
+ "1gMAAAAQAAABAAAA5AMAAA==");
+
+ public static void main(String[] args) {
+ doTest(new Transform());
+ }
+
+ public static void doTest(Transform t) {
+ t.sayHi(2, () -> { System.out.println("Not doing anything here"); });
+ t.sayHi(2, () -> {
+ System.out.println("transforming calling function");
+ doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ });
+ t.sayHi(2, () -> { System.out.println("Not doing anything here"); });
+ }
+
+ // Transforms the class
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+}
diff --git a/test/940-recursive-obsolete/src/Transform.java b/test/940-recursive-obsolete/src/Transform.java
new file mode 100644
index 0000000..97522cd
--- /dev/null
+++ b/test/940-recursive-obsolete/src/Transform.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ public void sayHi(int recur, Runnable r) {
+ System.out.println("hello" + recur);
+ if (recur == 1) {
+ r.run();
+ sayHi(recur - 1, r);
+ } else if (recur != 0) {
+ sayHi(recur - 1, r);
+ }
+ System.out.println("goodbye" + recur);
+ }
+}
diff --git a/test/941-recurive-obsolete-jit/build b/test/941-recurive-obsolete-jit/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/941-recurive-obsolete-jit/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/941-recurive-obsolete-jit/expected.txt b/test/941-recurive-obsolete-jit/expected.txt
new file mode 100644
index 0000000..086f7b0
--- /dev/null
+++ b/test/941-recurive-obsolete-jit/expected.txt
@@ -0,0 +1,22 @@
+hello2
+hello1
+Not doing anything here
+hello0
+goodbye0
+goodbye1
+goodbye2
+hello2
+hello1
+transforming calling function
+Hello0 - transformed
+Goodbye0 - transformed
+goodbye1
+goodbye2
+Hello2 - transformed
+Hello1 - transformed
+Not doing anything here
+Hello0 - transformed
+Goodbye0 - transformed
+Goodbye1 - transformed
+Goodbye2 - transformed
+
diff --git a/test/941-recurive-obsolete-jit/info.txt b/test/941-recurive-obsolete-jit/info.txt
new file mode 100644
index 0000000..c8b892c
--- /dev/null
+++ b/test/941-recurive-obsolete-jit/info.txt
@@ -0,0 +1 @@
+Tests basic obsolete method support
diff --git a/test/941-recurive-obsolete-jit/run b/test/941-recurive-obsolete-jit/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/941-recurive-obsolete-jit/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/941-recurive-obsolete-jit/src/Main.java b/test/941-recurive-obsolete-jit/src/Main.java
new file mode 100644
index 0000000..f6d6416
--- /dev/null
+++ b/test/941-recurive-obsolete-jit/src/Main.java
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+import java.util.function.Consumer;
+import java.lang.reflect.Method;
+
+public class Main {
+
+ // import java.util.function.Consumer;
+ // class Transform {
+ // public void sayHi(int recur, Consumer<String> reporter, Runnable r) {
+ // reporter.accept("Hello" + recur + " - transformed");
+ // if (recur == 1) {
+ // r.run();
+ // sayHi(recur - 1, reporter, r);
+ // } else if (recur != 0) {
+ // sayHi(recur - 1, reporter, r);
+ // }
+ // reporter.accept("Goodbye" + recur + " - transformed");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAMwoADgAaBwAbCgACABoIABwKAAIAHQoAAgAeCAAfCgACACALACEAIgsAIwAkCgAN" +
+ "ACUIACYHACcHACgBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAFc2F5" +
+ "SGkBADUoSUxqYXZhL3V0aWwvZnVuY3Rpb24vQ29uc3VtZXI7TGphdmEvbGFuZy9SdW5uYWJsZTsp" +
+ "VgEADVN0YWNrTWFwVGFibGUBAAlTaWduYXR1cmUBAEkoSUxqYXZhL3V0aWwvZnVuY3Rpb24vQ29u" +
+ "c3VtZXI8TGphdmEvbGFuZy9TdHJpbmc7PjtMamF2YS9sYW5nL1J1bm5hYmxlOylWAQAKU291cmNl" +
+ "RmlsZQEADlRyYW5zZm9ybS5qYXZhDAAPABABABdqYXZhL2xhbmcvU3RyaW5nQnVpbGRlcgEABUhl" +
+ "bGxvDAApACoMACkAKwEADiAtIHRyYW5zZm9ybWVkDAAsAC0HAC4MAC8AMAcAMQwAMgAQDAATABQB" +
+ "AAdHb29kYnllAQAJVHJhbnNmb3JtAQAQamF2YS9sYW5nL09iamVjdAEABmFwcGVuZAEALShMamF2" +
+ "YS9sYW5nL1N0cmluZzspTGphdmEvbGFuZy9TdHJpbmdCdWlsZGVyOwEAHChJKUxqYXZhL2xhbmcv" +
+ "U3RyaW5nQnVpbGRlcjsBAAh0b1N0cmluZwEAFCgpTGphdmEvbGFuZy9TdHJpbmc7AQAbamF2YS91" +
+ "dGlsL2Z1bmN0aW9uL0NvbnN1bWVyAQAGYWNjZXB0AQAVKExqYXZhL2xhbmcvT2JqZWN0OylWAQAS" +
+ "amF2YS9sYW5nL1J1bm5hYmxlAQADcnVuACAADQAOAAAAAAACAAAADwAQAAEAEQAAAB0AAQABAAAA" +
+ "BSq3AAGxAAAAAQASAAAABgABAAAAAgABABMAFAACABEAAACfAAQABAAAAGEsuwACWbcAAxIEtgAF" +
+ "G7YABhIHtgAFtgAIuQAJAgAbBKAAFS25AAoBACobBGQsLbYAC6cAEBuZAAwqGwRkLC22AAssuwAC" +
+ "WbcAAxIMtgAFG7YABhIHtgAFtgAIuQAJAgCxAAAAAgASAAAAIgAIAAAABAAeAAUAIwAGACkABwA1" +
+ "AAgAOQAJAEIACwBgAAwAFQAAAAQAAjUMABYAAAACABcAAQAYAAAAAgAZ");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQA7uevryhDgvad3G3EACTdspZGfNKv2i3kkBQAAcAAAAHhWNBIAAAAAAAAAAGwEAAAf" +
+ "AAAAcAAAAAkAAADsAAAABgAAABABAAAAAAAAAAAAAAkAAABYAQAAAQAAAKABAABkAwAAwAEAAMoC" +
+ "AADaAgAA3gIAAOICAADlAgAA7QIAAPECAAD6AgAAAQMAAAQDAAAHAwAACwMAAA8DAAAcAwAAOwMA" +
+ "AE8DAABlAwAAeQMAAJQDAACyAwAA0QMAAOEDAADkAwAA6gMAAO4DAAD2AwAA/gMAABIEAAAXBAAA" +
+ "HgQAACgEAAAIAAAADAAAAA0AAAAOAAAADwAAABAAAAARAAAAEwAAABUAAAAJAAAABQAAAAAAAAAK" +
+ "AAAABgAAAKgCAAALAAAABgAAALACAAAVAAAACAAAAAAAAAAWAAAACAAAALgCAAAXAAAACAAAAMQC" +
+ "AAABAAMABAAAAAEABAAcAAAAAwADAAQAAAAEAAMAGwAAAAYAAwAEAAAABgABABkAAAAGAAIAGQAA" +
+ "AAYAAAAdAAAABwAFABgAAAABAAAAAAAAAAMAAAAAAAAAFAAAAJACAABbBAAAAAAAAAEAAABHBAAA" +
+ "AQABAAEAAAAvBAAABAAAAHAQAgAAAA4ABgAEAAQAAAA0BAAAUAAAACIABgBwEAQAAAAbAQcAAABu" +
+ "IAYAEAAMAG4gBQAwAAwAGwEAAAAAbiAGABAADABuEAcAAAAMAHIgCAAEABIQMwMpAHIQAwAFANgA" +
+ "A/9uQAEAAlQiAAYAcBAEAAAAGwEGAAAAbiAGABAADABuIAUAMAAMABsBAAAAAG4gBgAQAAwAbhAH" +
+ "AAAADAByIAgABAAOADgD4f/YAAP/bkABAAJUKNoAAAAAAAAAAAEAAAAAAAAAAQAAAMABAAABAAAA" +
+ "AAAAAAEAAAAFAAAAAwAAAAAABwAEAAAAAQAAAAMADiAtIHRyYW5zZm9ybWVkAAIoSQACKVYAATwA" +
+ "Bjxpbml0PgACPjsAB0dvb2RieWUABUhlbGxvAAFJAAFMAAJMSQACTEwAC0xUcmFuc2Zvcm07AB1M" +
+ "ZGFsdmlrL2Fubm90YXRpb24vU2lnbmF0dXJlOwASTGphdmEvbGFuZy9PYmplY3Q7ABRMamF2YS9s" +
+ "YW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxk" +
+ "ZXI7ABxMamF2YS91dGlsL2Z1bmN0aW9uL0NvbnN1bWVyAB1MamF2YS91dGlsL2Z1bmN0aW9uL0Nv" +
+ "bnN1bWVyOwAOVHJhbnNmb3JtLmphdmEAAVYABFZJTEwAAlZMAAZhY2NlcHQABmFwcGVuZAASZW1p" +
+ "dHRlcjogamFjay00LjI0AANydW4ABXNheUhpAAh0b1N0cmluZwAFdmFsdWUAAgAHDgAEAwAAAAcO" +
+ "AR4PPDxdAR4PGS0AAgIBHhwHFwEXEhcDFxAXBRcPFwIAAAEBAICABMgDAQHgAwAAAA8AAAAAAAAA" +
+ "AQAAAAAAAAABAAAAHwAAAHAAAAACAAAACQAAAOwAAAADAAAABgAAABABAAAFAAAACQAAAFgBAAAG" +
+ "AAAAAQAAAKABAAADEAAAAQAAAMABAAABIAAAAgAAAMgBAAAGIAAAAQAAAJACAAABEAAABAAAAKgC" +
+ "AAACIAAAHwAAAMoCAAADIAAAAgAAAC8EAAAEIAAAAQAAAEcEAAAAIAAAAQAAAFsEAAAAEAAAAQAA" +
+ "AGwEAAA=");
+
+ // A class that we can use to keep track of the output of this test.
+ private static class TestWatcher implements Consumer<String> {
+ private StringBuilder sb;
+ public TestWatcher() {
+ sb = new StringBuilder();
+ }
+
+ @Override
+ public void accept(String s) {
+ sb.append(s);
+ sb.append('\n');
+ }
+
+ public String getOutput() {
+ return sb.toString();
+ }
+
+ public void clear() {
+ sb = new StringBuilder();
+ }
+ }
+
+ public static void main(String[] args) {
+ doTest(new Transform());
+ }
+
+ private static boolean retry = false;
+
+ public static void doTest(Transform t) {
+ final TestWatcher reporter = new TestWatcher();
+ Method say_hi_method;
+ // Figure out if we can even JIT at all.
+ final boolean has_jit = hasJit();
+ try {
+ say_hi_method = Transform.class.getDeclaredMethod(
+ "sayHi", int.class, Consumer.class, Runnable.class);
+ } catch (Exception e) {
+ System.out.println("Unable to find methods!");
+ e.printStackTrace();
+ return;
+ }
+ // Makes sure the stack is the way we want it for the test and does the redefinition. It will
+ // set the retry boolean to true if we need to go around again due to jit code being GCd.
+ Runnable do_redefinition = () -> {
+ if (has_jit && Main.isInterpretedFunction(say_hi_method, true)) {
+ // Try again. We are not running the right jitted methods/cannot redefine them now.
+ retry = true;
+ } else {
+ // Actually do the redefinition. The stack looks good.
+ retry = false;
+ reporter.accept("transforming calling function");
+ doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ }
+ };
+ do {
+ // Run ensureJitCompiled here since it might get GCd
+ ensureJitCompiled(Transform.class, "sayHi");
+ // Clear output.
+ reporter.clear();
+ t.sayHi(2, reporter, () -> { reporter.accept("Not doing anything here"); });
+ t.sayHi(2, reporter, do_redefinition);
+ t.sayHi(2, reporter, () -> { reporter.accept("Not doing anything here"); });
+ } while(retry);
+ System.out.println(reporter.getOutput());
+ }
+
+ private static native boolean hasJit();
+
+ private static native boolean isInterpretedFunction(Method m, boolean require_deoptimizable);
+
+ private static native void ensureJitCompiled(Class c, String name);
+
+ // Transforms the class
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+}
diff --git a/test/941-recurive-obsolete-jit/src/Transform.java b/test/941-recurive-obsolete-jit/src/Transform.java
new file mode 100644
index 0000000..e6a913a
--- /dev/null
+++ b/test/941-recurive-obsolete-jit/src/Transform.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.function.Consumer;
+class Transform {
+ public void sayHi(int recur, Consumer<String> c, Runnable r) {
+ c.accept("hello" + recur);
+ if (recur == 1) {
+ r.run();
+ sayHi(recur - 1, c, r);
+ } else if (recur != 0) {
+ sayHi(recur - 1, c, r);
+ }
+ c.accept("goodbye" + recur);
+ }
+}
diff --git a/test/942-private-recursive/build b/test/942-private-recursive/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/942-private-recursive/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/942-private-recursive/expected.txt b/test/942-private-recursive/expected.txt
new file mode 100644
index 0000000..18ffc25
--- /dev/null
+++ b/test/942-private-recursive/expected.txt
@@ -0,0 +1,21 @@
+hello2
+hello1
+Not doing anything here
+hello0
+goodbye0
+goodbye1
+goodbye2
+hello2
+hello1
+transforming calling function
+Hello0 - transformed
+Goodbye0 - transformed
+goodbye1
+goodbye2
+Hello2 - transformed
+Hello1 - transformed
+Not doing anything here
+Hello0 - transformed
+Goodbye0 - transformed
+Goodbye1 - transformed
+Goodbye2 - transformed
diff --git a/test/942-private-recursive/info.txt b/test/942-private-recursive/info.txt
new file mode 100644
index 0000000..c8b892c
--- /dev/null
+++ b/test/942-private-recursive/info.txt
@@ -0,0 +1 @@
+Tests basic obsolete method support
diff --git a/test/942-private-recursive/run b/test/942-private-recursive/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/942-private-recursive/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/942-private-recursive/src/Main.java b/test/942-private-recursive/src/Main.java
new file mode 100644
index 0000000..8cbab7b
--- /dev/null
+++ b/test/942-private-recursive/src/Main.java
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+public class Main {
+
+ // class Transform {
+ // public void sayHi(int recur, Runnable r) {
+ // privateSayHi(recur, r);
+ // }
+ // private void privateSayHi(int recur, Runnable r) {
+ // System.out.println("Hello" + recur + " - transformed");
+ // if (recur == 1) {
+ // r.run();
+ // privateSayHi(recur - 1, r);
+ // } else if (recur != 0) {
+ // privateSayHi(recur - 1, r);
+ // }
+ // System.out.println("Goodbye" + recur + " - transformed");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAOAoADwAaCgAOABsJABwAHQcAHgoABAAaCAAfCgAEACAKAAQAIQgAIgoABAAjCgAk" +
+ "ACULACYAJwgAKAcAKQcAKgEABjxpbml0PgEAAygpVgEABENvZGUBAA9MaW5lTnVtYmVyVGFibGUB" +
+ "AAVzYXlIaQEAGChJTGphdmEvbGFuZy9SdW5uYWJsZTspVgEADHByaXZhdGVTYXlIaQEADVN0YWNr" +
+ "TWFwVGFibGUBAApTb3VyY2VGaWxlAQAOVHJhbnNmb3JtLmphdmEMABAAEQwAFgAVBwArDAAsAC0B" +
+ "ABdqYXZhL2xhbmcvU3RyaW5nQnVpbGRlcgEABUhlbGxvDAAuAC8MAC4AMAEADiAtIHRyYW5zZm9y" +
+ "bWVkDAAxADIHADMMADQANQcANgwANwARAQAHR29vZGJ5ZQEACVRyYW5zZm9ybQEAEGphdmEvbGFu" +
+ "Zy9PYmplY3QBABBqYXZhL2xhbmcvU3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07" +
+ "AQAGYXBwZW5kAQAtKExqYXZhL2xhbmcvU3RyaW5nOylMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7" +
+ "AQAcKEkpTGphdmEvbGFuZy9TdHJpbmdCdWlsZGVyOwEACHRvU3RyaW5nAQAUKClMamF2YS9sYW5n" +
+ "L1N0cmluZzsBABNqYXZhL2lvL1ByaW50U3RyZWFtAQAHcHJpbnRsbgEAFShMamF2YS9sYW5nL1N0" +
+ "cmluZzspVgEAEmphdmEvbGFuZy9SdW5uYWJsZQEAA3J1bgAgAA4ADwAAAAAAAwAAABAAEQABABIA" +
+ "AAAdAAEAAQAAAAUqtwABsQAAAAEAEwAAAAYAAQAAAAEAAQAUABUAAQASAAAAIwADAAMAAAAHKhss" +
+ "twACsQAAAAEAEwAAAAoAAgAAAAMABgAEAAIAFgAVAAEAEgAAAJ0AAwADAAAAX7IAA7sABFm3AAUS" +
+ "BrYABxu2AAgSCbYAB7YACrYACxsEoAAULLkADAEAKhsEZCy3AAKnAA8bmQALKhsEZCy3AAKyAAO7" +
+ "AARZtwAFEg22AAcbtgAIEgm2AAe2AAq2AAuxAAAAAgATAAAAIgAIAAAABgAeAAcAIwAIACkACQA0" +
+ "AAoAOAALAEAADQBeAA4AFwAAAAQAAjQLAAEAGAAAAAIAGQ==");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQBQqwVIiZvIuS8j1HDurKbXZEV62Mnug5PEBAAAcAAAAHhWNBIAAAAAAAAAACQEAAAb" +
+ "AAAAcAAAAAkAAADcAAAABgAAAAABAAABAAAASAEAAAoAAABQAQAAAQAAAKABAAAEAwAAwAEAAMAC" +
+ "AADQAgAA2AIAAOECAADoAgAA6wIAAO4CAADyAgAA9gIAAAMDAAAaAwAALgMAAEQDAABYAwAAcwMA" +
+ "AIcDAACXAwAAmgMAAJ8DAACjAwAAqwMAAL8DAADEAwAAzQMAANsDAADgAwAA5wMAAAQAAAAIAAAA" +
+ "CQAAAAoAAAALAAAADAAAAA0AAAAOAAAAEAAAAAUAAAAFAAAAAAAAAAYAAAAGAAAAqAIAAAcAAAAG" +
+ "AAAAsAIAABAAAAAIAAAAAAAAABEAAAAIAAAAuAIAABIAAAAIAAAAsAIAAAcAAgAVAAAAAQADAAEA" +
+ "AAABAAQAFwAAAAEABAAZAAAAAgAFABYAAAADAAMAAQAAAAQAAwAYAAAABgADAAEAAAAGAAEAEwAA" +
+ "AAYAAgATAAAABgAAABoAAAABAAAAAAAAAAMAAAAAAAAADwAAAAAAAAAQBAAAAAAAAAEAAQABAAAA" +
+ "8QMAAAQAAABwEAQAAAAOAAYAAwADAAAA9gMAAFQAAABiAAAAIgEGAHAQBgABABsCAwAAAG4gCAAh" +
+ "AAwBbiAHAEEADAEbAgAAAABuIAgAIQAMAW4QCQABAAwBbiADABAAEhAzBCsAchAFAAUA2AAE/3Aw" +
+ "AQADBWIAAAAiAQYAcBAGAAEAGwICAAAAbiAIACEADAFuIAcAQQAMARsCAAAAAG4gCAAhAAwBbhAJ" +
+ "AAEADAFuIAMAEAAOADgE3//YAAT/cDABAAMFKNgDAAMAAwAAAAgEAAAEAAAAcDABABACDgABAAAA" +
+ "AAAAAAEAAAAFAAAAAgAAAAAABAAOIC0gdHJhbnNmb3JtZWQABjxpbml0PgAHR29vZGJ5ZQAFSGVs" +
+ "bG8AAUkAAUwAAkxJAAJMTAALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGph" +
+ "dmEvbGFuZy9PYmplY3Q7ABRMamF2YS9sYW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7" +
+ "ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7ABJMamF2YS9sYW5nL1N5c3RlbTsADlRyYW5zZm9y" +
+ "bS5qYXZhAAFWAANWSUwAAlZMAAZhcHBlbmQAEmVtaXR0ZXI6IGphY2stNC4yNAADb3V0AAdwcmlu" +
+ "dGxuAAxwcml2YXRlU2F5SGkAA3J1bgAFc2F5SGkACHRvU3RyaW5nAAEABw4ABgIAAAcOASAPPDxd" +
+ "ASAPGS0AAwIAAAcOPAAAAAIBAICABMADAQLYAwIBkAUAAA0AAAAAAAAAAQAAAAAAAAABAAAAGwAA" +
+ "AHAAAAACAAAACQAAANwAAAADAAAABgAAAAABAAAEAAAAAQAAAEgBAAAFAAAACgAAAFABAAAGAAAA" +
+ "AQAAAKABAAABIAAAAwAAAMABAAABEAAAAwAAAKgCAAACIAAAGwAAAMACAAADIAAAAwAAAPEDAAAA" +
+ "IAAAAQAAABAEAAAAEAAAAQAAACQEAAA=");
+
+ public static void main(String[] args) {
+ doTest(new Transform());
+ }
+
+ public static void doTest(Transform t) {
+ t.sayHi(2, () -> { System.out.println("Not doing anything here"); });
+ t.sayHi(2, () -> {
+ System.out.println("transforming calling function");
+ doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ });
+ t.sayHi(2, () -> { System.out.println("Not doing anything here"); });
+ }
+
+ // Transforms the class
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+}
diff --git a/test/942-private-recursive/src/Transform.java b/test/942-private-recursive/src/Transform.java
new file mode 100644
index 0000000..7714326
--- /dev/null
+++ b/test/942-private-recursive/src/Transform.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ private void privateSayHi(int recur, Runnable r) {
+ System.out.println("hello" + recur);
+ if (recur == 1) {
+ r.run();
+ privateSayHi(recur - 1, r);
+ } else if (recur != 0) {
+ privateSayHi(recur - 1, r);
+ }
+ System.out.println("goodbye" + recur);
+ }
+
+ public void sayHi(int recur, Runnable r) {
+ privateSayHi(recur, r);
+ }
+}
diff --git a/test/943-private-recursive-jit/build b/test/943-private-recursive-jit/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/943-private-recursive-jit/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/943-private-recursive-jit/expected.txt b/test/943-private-recursive-jit/expected.txt
new file mode 100644
index 0000000..447f4a2
--- /dev/null
+++ b/test/943-private-recursive-jit/expected.txt
@@ -0,0 +1,22 @@
+hello2
+hello1
+Not doing anything here
+hello0
+goodbye0
+goodbye1
+goodbye2
+hello2
+hello1
+transforming calling function
+hello0 - transformed
+goodbye0 - transformed
+goodbye1
+goodbye2
+hello2 - transformed
+hello1 - transformed
+Not doing anything here
+hello0 - transformed
+goodbye0 - transformed
+goodbye1 - transformed
+goodbye2 - transformed
+
diff --git a/test/943-private-recursive-jit/info.txt b/test/943-private-recursive-jit/info.txt
new file mode 100644
index 0000000..c8b892c
--- /dev/null
+++ b/test/943-private-recursive-jit/info.txt
@@ -0,0 +1 @@
+Tests basic obsolete method support
diff --git a/test/943-private-recursive-jit/run b/test/943-private-recursive-jit/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/943-private-recursive-jit/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/943-private-recursive-jit/src/Main.java b/test/943-private-recursive-jit/src/Main.java
new file mode 100644
index 0000000..8fa534d
--- /dev/null
+++ b/test/943-private-recursive-jit/src/Main.java
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+import java.util.function.Consumer;
+import java.lang.reflect.Method;
+
+public class Main {
+ static final boolean ALWAYS_PRINT = false;
+
+ // import java.util.function.Consumer;
+ // class Transform {
+ // public void sayHi(int recur, Consumer<String> reporter, Runnable r) {
+ // privateSayHi(recur, reporter, r);
+ // }
+ // private void privateSayHi(int recur, Consumer<String> reporter, Runnable r) {
+ // reporter.accpet("hello" + recur + " - transformed");
+ // if (recur == 1) {
+ // r.run();
+ // privateSayHi(recur - 1, reporter, r);
+ // } else if (recur != 0) {
+ // privateSayHi(recur - 1, reporter, r);
+ // }
+ // reporter.accept("goodbye" + recur + " - transformed");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQANAoADgAbCgANABwHAB0KAAMAGwgAHgoAAwAfCgADACAIACEKAAMAIgsAIwAkCwAl" +
+ "ACYIACcHACgHACkBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAFc2F5" +
+ "SGkBADUoSUxqYXZhL3V0aWwvZnVuY3Rpb24vQ29uc3VtZXI7TGphdmEvbGFuZy9SdW5uYWJsZTsp" +
+ "VgEACVNpZ25hdHVyZQEASShJTGphdmEvdXRpbC9mdW5jdGlvbi9Db25zdW1lcjxMamF2YS9sYW5n" +
+ "L1N0cmluZzs+O0xqYXZhL2xhbmcvUnVubmFibGU7KVYBAAxwcml2YXRlU2F5SGkBAA1TdGFja01h" +
+ "cFRhYmxlAQAKU291cmNlRmlsZQEADlRyYW5zZm9ybS5qYXZhDAAPABAMABcAFAEAF2phdmEvbGFu" +
+ "Zy9TdHJpbmdCdWlsZGVyAQAFaGVsbG8MACoAKwwAKgAsAQAOIC0gdHJhbnNmb3JtZWQMAC0ALgcA" +
+ "LwwAMAAxBwAyDAAzABABAAdnb29kYnllAQAJVHJhbnNmb3JtAQAQamF2YS9sYW5nL09iamVjdAEA" +
+ "BmFwcGVuZAEALShMamF2YS9sYW5nL1N0cmluZzspTGphdmEvbGFuZy9TdHJpbmdCdWlsZGVyOwEA" +
+ "HChJKUxqYXZhL2xhbmcvU3RyaW5nQnVpbGRlcjsBAAh0b1N0cmluZwEAFCgpTGphdmEvbGFuZy9T" +
+ "dHJpbmc7AQAbamF2YS91dGlsL2Z1bmN0aW9uL0NvbnN1bWVyAQAGYWNjZXB0AQAVKExqYXZhL2xh" +
+ "bmcvT2JqZWN0OylWAQASamF2YS9sYW5nL1J1bm5hYmxlAQADcnVuACAADQAOAAAAAAADAAAADwAQ" +
+ "AAEAEQAAAB0AAQABAAAABSq3AAGxAAAAAQASAAAABgABAAAAAgABABMAFAACABEAAAAkAAQABAAA" +
+ "AAgqGywttwACsQAAAAEAEgAAAAoAAgAAAAQABwAFABUAAAACABYAAgAXABQAAgARAAAAnwAEAAQA" +
+ "AABhLLsAA1m3AAQSBbYABhu2AAcSCLYABrYACbkACgIAGwSgABUtuQALAQAqGwRkLC23AAKnABAb" +
+ "mQAMKhsEZCwttwACLLsAA1m3AAQSDLYABhu2AAcSCLYABrYACbkACgIAsQAAAAIAEgAAACIACAAA" +
+ "AAcAHgAIACMACQApAAoANQALADkADABCAA4AYAAPABgAAAAEAAI1DAAVAAAAAgAWAAEAGQAAAAIA" +
+ "Gg==");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCevtlr8B0kh/duuDYqXkGz/w9lMmtCCuRoBQAAcAAAAHhWNBIAAAAAAAAAALAEAAAg" +
+ "AAAAcAAAAAkAAADwAAAABgAAABQBAAAAAAAAAAAAAAoAAABcAQAAAQAAAKwBAACcAwAAzAEAAPYC" +
+ "AAAGAwAACgMAAA4DAAARAwAAGQMAAB0DAAAgAwAAIwMAACcDAAArAwAAOAMAAFcDAABrAwAAgQMA" +
+ "AJUDAACwAwAAzgMAAO0DAAD9AwAAAAQAAAYEAAAKBAAAEgQAABoEAAAuBAAANwQAAD4EAABMBAAA" +
+ "UQQAAFgEAABiBAAABgAAAAoAAAALAAAADAAAAA0AAAAOAAAADwAAABEAAAATAAAABwAAAAUAAAAA" +
+ "AAAACAAAAAYAAADUAgAACQAAAAYAAADcAgAAEwAAAAgAAAAAAAAAFAAAAAgAAADkAgAAFQAAAAgA" +
+ "AADwAgAAAQADAAQAAAABAAQAGwAAAAEABAAdAAAAAwADAAQAAAAEAAMAHAAAAAYAAwAEAAAABgAB" +
+ "ABcAAAAGAAIAFwAAAAYAAAAeAAAABwAFABYAAAABAAAAAAAAAAMAAAAAAAAAEgAAALQCAACeBAAA" +
+ "AAAAAAEAAACKBAAAAQABAAEAAABpBAAABAAAAHAQAwAAAA4ABgAEAAQAAABuBAAAUAAAACIABgBw" +
+ "EAUAAAAbARoAAABuIAcAEAAMAG4gBgAwAAwAGwEAAAAAbiAHABAADABuEAgAAAAMAHIgCQAEABIQ" +
+ "MwMpAHIQBAAFANgAA/9wQAEAAlQiAAYAcBAFAAAAGwEZAAAAbiAHABAADABuIAYAMAAMABsBAAAA" +
+ "AG4gBwAQAAwAbhAIAAAADAByIAkABAAOADgD4f/YAAP/cEABAAJUKNoEAAQABAAAAIEEAAAEAAAA" +
+ "cEABABAyDgAAAAAAAAAAAAIAAAAAAAAAAQAAAMwBAAACAAAAzAEAAAEAAAAAAAAAAQAAAAUAAAAD" +
+ "AAAAAAAHAAQAAAABAAAAAwAOIC0gdHJhbnNmb3JtZWQAAihJAAIpVgABPAAGPGluaXQ+AAI+OwAB" +
+ "SQABTAACTEkAAkxMAAtMVHJhbnNmb3JtOwAdTGRhbHZpay9hbm5vdGF0aW9uL1NpZ25hdHVyZTsA" +
+ "EkxqYXZhL2xhbmcvT2JqZWN0OwAUTGphdmEvbGFuZy9SdW5uYWJsZTsAEkxqYXZhL2xhbmcvU3Ry" +
+ "aW5nOwAZTGphdmEvbGFuZy9TdHJpbmdCdWlsZGVyOwAcTGphdmEvdXRpbC9mdW5jdGlvbi9Db25z" +
+ "dW1lcgAdTGphdmEvdXRpbC9mdW5jdGlvbi9Db25zdW1lcjsADlRyYW5zZm9ybS5qYXZhAAFWAARW" +
+ "SUxMAAJWTAAGYWNjZXB0AAZhcHBlbmQAEmVtaXR0ZXI6IGphY2stNC4yNAAHZ29vZGJ5ZQAFaGVs" +
+ "bG8ADHByaXZhdGVTYXlIaQADcnVuAAVzYXlIaQAIdG9TdHJpbmcABXZhbHVlAAIABw4ABwMAAAAH" +
+ "DgEeDzw8XQEeDxktAAQDAAAABw48AAICAR8cBxcBFxAXAxcOFwUXDRcCAAACAQCAgATUAwEC7AMC" +
+ "AZwFDwAAAAAAAAABAAAAAAAAAAEAAAAgAAAAcAAAAAIAAAAJAAAA8AAAAAMAAAAGAAAAFAEAAAUA" +
+ "AAAKAAAAXAEAAAYAAAABAAAArAEAAAMQAAABAAAAzAEAAAEgAAADAAAA1AEAAAYgAAABAAAAtAIA" +
+ "AAEQAAAEAAAA1AIAAAIgAAAgAAAA9gIAAAMgAAADAAAAaQQAAAQgAAABAAAAigQAAAAgAAABAAAA" +
+ "ngQAAAAQAAABAAAAsAQAAA==");
+
+ // A class that we can use to keep track of the output of this test.
+ private static class TestWatcher implements Consumer<String> {
+ private StringBuilder sb;
+ public TestWatcher() {
+ sb = new StringBuilder();
+ }
+
+ @Override
+ public void accept(String s) {
+ if (Main.ALWAYS_PRINT) {
+ System.out.println(s);
+ }
+ sb.append(s);
+ sb.append('\n');
+ }
+
+ public String getOutput() {
+ return sb.toString();
+ }
+
+ public void clear() {
+ sb = new StringBuilder();
+ }
+ }
+
+ public static void main(String[] args) {
+ doTest(new Transform());
+ }
+
+ private static boolean retry = false;
+
+ public static void doTest(Transform t) {
+ final TestWatcher reporter = new TestWatcher();
+ Method say_hi_method;
+ Method private_say_hi_method;
+ // Figure out if we can even JIT at all.
+ final boolean has_jit = hasJit();
+ try {
+ say_hi_method = Transform.class.getDeclaredMethod(
+ "sayHi", int.class, Consumer.class, Runnable.class);
+ private_say_hi_method = Transform.class.getDeclaredMethod(
+ "privateSayHi", int.class, Consumer.class, Runnable.class);
+ } catch (Exception e) {
+ System.out.println("Unable to find methods!");
+ e.printStackTrace();
+ return;
+ }
+ // Makes sure the stack is the way we want it for the test and does the redefinition. It will
+ // set the retry boolean to true if we need to go around again due to jit code being GCd.
+ Runnable do_redefinition = () -> {
+ if (has_jit &&
+ (Main.isInterpretedFunction(say_hi_method, true) ||
+ Main.isInterpretedFunction(private_say_hi_method, true))) {
+ // Try again. We are not running the right jitted methods/cannot redefine them now.
+ retry = true;
+ } else {
+ // Actually do the redefinition. The stack looks good.
+ retry = false;
+ reporter.accept("transforming calling function");
+ doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ }
+ };
+ do {
+ // Run ensureJitCompiled here since it might get GCd
+ ensureJitCompiled(Transform.class, "sayHi");
+ ensureJitCompiled(Transform.class, "privateSayHi");
+ // Clear output.
+ reporter.clear();
+ t.sayHi(2, reporter, () -> { reporter.accept("Not doing anything here"); });
+ t.sayHi(2, reporter, do_redefinition);
+ t.sayHi(2, reporter, () -> { reporter.accept("Not doing anything here"); });
+ } while(retry);
+ System.out.println(reporter.getOutput());
+ }
+
+ private static native boolean hasJit();
+
+ private static native boolean isInterpretedFunction(Method m, boolean require_deoptimizable);
+
+ private static native void ensureJitCompiled(Class c, String name);
+
+ // Transforms the class
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+}
diff --git a/test/943-private-recursive-jit/src/Transform.java b/test/943-private-recursive-jit/src/Transform.java
new file mode 100644
index 0000000..9ec3e42
--- /dev/null
+++ b/test/943-private-recursive-jit/src/Transform.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.function.Consumer;
+class Transform {
+ public void sayHi(int recur, Consumer<String> reporter, Runnable r) {
+ privateSayHi(recur, reporter, r);
+ }
+
+ private void privateSayHi(int recur, Consumer<String> reporter, Runnable r) {
+ reporter.accept("hello" + recur);
+ if (recur == 1) {
+ r.run();
+ privateSayHi(recur - 1, reporter, r);
+ } else if (recur != 0) {
+ privateSayHi(recur - 1, reporter, r);
+ }
+ reporter.accept("goodbye" + recur);
+ }
+}
diff --git a/test/944-transform-classloaders/build b/test/944-transform-classloaders/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/944-transform-classloaders/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/944-transform-classloaders/classloader.cc b/test/944-transform-classloaders/classloader.cc
new file mode 100644
index 0000000..5fbd8e1
--- /dev/null
+++ b/test/944-transform-classloaders/classloader.cc
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/macros.h"
+#include "jni.h"
+#include "mirror/class-inl.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedLocalRef.h"
+
+#include "ti-agent/common_helper.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test944TransformClassloaders {
+
+
+extern "C" JNIEXPORT jlong JNICALL Java_Main_getDexFilePointer(JNIEnv* env, jclass, jclass klass) {
+ if (Runtime::Current() == nullptr) {
+ env->ThrowNew(env->FindClass("java/lang/Exception"),
+ "We do not seem to be running in ART! Unable to get dex file.");
+ return 0;
+ }
+ ScopedObjectAccess soa(env);
+ // This sequence of casts must be the same as those done in
+ // runtime/native/dalvik_system_DexFile.cc in order to ensure that we get the same results.
+ return static_cast<jlong>(reinterpret_cast<uintptr_t>(
+ &soa.Decode<mirror::Class>(klass)->GetDexFile()));
+}
+
+} // namespace Test944TransformClassloaders
+} // namespace art
diff --git a/test/944-transform-classloaders/expected.txt b/test/944-transform-classloaders/expected.txt
new file mode 100644
index 0000000..7952247
--- /dev/null
+++ b/test/944-transform-classloaders/expected.txt
@@ -0,0 +1,5 @@
+hello
+hello2
+Goodbye
+Goodbye2
+Passed
diff --git a/test/944-transform-classloaders/info.txt b/test/944-transform-classloaders/info.txt
new file mode 100644
index 0000000..9155564
--- /dev/null
+++ b/test/944-transform-classloaders/info.txt
@@ -0,0 +1,7 @@
+Tests that redefined dex files are stored in the appropriate classloader.
+
+This test cannot run on the RI.
+
+We use reflection with setAccessible(true) to examine the private internals of
+classloaders. Changes to the internal operation or definition of
+dalvik.system.BaseDexClassLoader might cause this test to fail.
diff --git a/test/944-transform-classloaders/run b/test/944-transform-classloaders/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/944-transform-classloaders/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/944-transform-classloaders/src/CommonClassDefinition.java b/test/944-transform-classloaders/src/CommonClassDefinition.java
new file mode 100644
index 0000000..62602a0
--- /dev/null
+++ b/test/944-transform-classloaders/src/CommonClassDefinition.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class CommonClassDefinition {
+ public final Class<?> target;
+ public final byte[] class_file_bytes;
+ public final byte[] dex_file_bytes;
+
+ CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+ this.target = target;
+ this.class_file_bytes = class_file_bytes;
+ this.dex_file_bytes = dex_file_bytes;
+ }
+}
diff --git a/test/944-transform-classloaders/src/Main.java b/test/944-transform-classloaders/src/Main.java
new file mode 100644
index 0000000..4911e00
--- /dev/null
+++ b/test/944-transform-classloaders/src/Main.java
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.lang.reflect.*;
+public class Main {
+
+ /**
+ * base64 encoded class/dex file for
+ * class Transform {
+ * public void sayHi() {
+ * System.out.println("Goodbye");
+ * }
+ * }
+ */
+ private static CommonClassDefinition TRANSFORM_DEFINITION = new CommonClassDefinition(
+ Transform.class,
+ Base64.getDecoder().decode(
+ "yv66vgAAADQAHAoABgAOCQAPABAIABEKABIAEwcAFAcAFQEABjxpbml0PgEAAygpVgEABENvZGUB" +
+ "AA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA5UcmFuc2Zvcm0uamF2YQwA" +
+ "BwAIBwAWDAAXABgBAAdHb29kYnllBwAZDAAaABsBAAlUcmFuc2Zvcm0BABBqYXZhL2xhbmcvT2Jq" +
+ "ZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwEAE2ph" +
+ "dmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWACAABQAG" +
+ "AAAAAAACAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAEQABAAsACAAB" +
+ "AAkAAAAlAAIAAQAAAAmyAAISA7YABLEAAAABAAoAAAAKAAIAAAATAAgAFAABAAwAAAACAA0="),
+ Base64.getDecoder().decode(
+ "ZGV4CjAzNQCLXSBQ5FiS3f16krSYZFF8xYZtFVp0GRXMAgAAcAAAAHhWNBIAAAAAAAAAACwCAAAO" +
+ "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAACsAQAAIAEAAGIB" +
+ "AABqAQAAcwEAAIABAACXAQAAqwEAAL8BAADTAQAA4wEAAOYBAADqAQAA/gEAAAMCAAAMAgAAAgAA" +
+ "AAMAAAAEAAAABQAAAAYAAAAIAAAACAAAAAUAAAAAAAAACQAAAAUAAABcAQAABAABAAsAAAAAAAAA" +
+ "AAAAAAAAAAANAAAAAQABAAwAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAHAAAAAAAAAB4CAAAA" +
+ "AAAAAQABAAEAAAATAgAABAAAAHAQAwAAAA4AAwABAAIAAAAYAgAACQAAAGIAAAAbAQEAAABuIAIA" +
+ "EAAOAAAAAQAAAAMABjxpbml0PgAHR29vZGJ5ZQALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50" +
+ "U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZhL2xh" +
+ "bmcvU3lzdGVtOwAOVHJhbnNmb3JtLmphdmEAAVYAAlZMABJlbWl0dGVyOiBqYWNrLTMuMzYAA291" +
+ "dAAHcHJpbnRsbgAFc2F5SGkAEQAHDgATAAcOhQAAAAEBAICABKACAQG4Ag0AAAAAAAAAAQAAAAAA" +
+ "AAABAAAADgAAAHAAAAACAAAABgAAAKgAAAADAAAAAgAAAMAAAAAEAAAAAQAAANgAAAAFAAAABAAA" +
+ "AOAAAAAGAAAAAQAAAAABAAABIAAAAgAAACABAAABEAAAAQAAAFwBAAACIAAADgAAAGIBAAADIAAA" +
+ "AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA="));
+
+ /**
+ * base64 encoded class/dex file for
+ * class Transform2 {
+ * public void sayHi() {
+ * System.out.println("Goodbye2");
+ * }
+ * }
+ */
+ private static CommonClassDefinition TRANSFORM2_DEFINITION = new CommonClassDefinition(
+ Transform2.class,
+ Base64.getDecoder().decode(
+ "yv66vgAAADQAHAoABgAOCQAPABAIABEKABIAEwcAFAcAFQEABjxpbml0PgEAAygpVgEABENvZGUB" +
+ "AA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA9UcmFuc2Zvcm0yLmphdmEM" +
+ "AAcACAcAFgwAFwAYAQAIR29vZGJ5ZTIHABkMABoAGwEAClRyYW5zZm9ybTIBABBqYXZhL2xhbmcv" +
+ "T2JqZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwEA" +
+ "E2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWACAA" +
+ "BQAGAAAAAAACAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAAQABAAsA" +
+ "CAABAAkAAAAlAAIAAQAAAAmyAAISA7YABLEAAAABAAoAAAAKAAIAAAADAAgABAABAAwAAAACAA0="),
+ Base64.getDecoder().decode(
+ "ZGV4CjAzNQABX6vL8OT7aGLjbzFBEfCM9Aaz+zzGzVnQAgAAcAAAAHhWNBIAAAAAAAAAADACAAAO" +
+ "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAACwAQAAIAEAAGIB" +
+ "AABqAQAAdAEAAIIBAACZAQAArQEAAMEBAADVAQAA5gEAAOkBAADtAQAAAQIAAAYCAAAPAgAAAgAA" +
+ "AAMAAAAEAAAABQAAAAYAAAAIAAAACAAAAAUAAAAAAAAACQAAAAUAAABcAQAABAABAAsAAAAAAAAA" +
+ "AAAAAAAAAAANAAAAAQABAAwAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAHAAAAAAAAACECAAAA" +
+ "AAAAAQABAAEAAAAWAgAABAAAAHAQAwAAAA4AAwABAAIAAAAbAgAACQAAAGIAAAAbAQEAAABuIAIA" +
+ "EAAOAAAAAQAAAAMABjxpbml0PgAIR29vZGJ5ZTIADExUcmFuc2Zvcm0yOwAVTGphdmEvaW8vUHJp" +
+ "bnRTdHJlYW07ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEv" +
+ "bGFuZy9TeXN0ZW07AA9UcmFuc2Zvcm0yLmphdmEAAVYAAlZMABJlbWl0dGVyOiBqYWNrLTQuMjQA" +
+ "A291dAAHcHJpbnRsbgAFc2F5SGkAAQAHDgADAAcOhwAAAAEBAICABKACAQG4AgANAAAAAAAAAAEA" +
+ "AAAAAAAAAQAAAA4AAABwAAAAAgAAAAYAAACoAAAAAwAAAAIAAADAAAAABAAAAAEAAADYAAAABQAA" +
+ "AAQAAADgAAAABgAAAAEAAAAAAQAAASAAAAIAAAAgAQAAARAAAAEAAABcAQAAAiAAAA4AAABiAQAA" +
+ "AyAAAAIAAAAWAgAAACAAAAEAAAAhAgAAABAAAAEAAAAwAgAA"));
+
+ public static void main(String[] args) throws Exception {
+ doTest();
+ System.out.println("Passed");
+ }
+
+ private static void checkIsInstance(Class<?> klass, Object o) throws Exception {
+ if (!klass.isInstance(o)) {
+ throw new Exception(klass + " is not the class of " + o);
+ }
+ }
+
+ private static boolean arrayContains(long[] arr, long value) {
+ if (arr == null) {
+ return false;
+ }
+ for (int i = 0; i < arr.length; i++) {
+ if (arr[i] == value) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Checks that we can find the dex-file for the given class in its classloader.
+ *
+ * Throws if it fails.
+ */
+ private static void checkDexFileInClassLoader(Class<?> klass) throws Exception {
+ // If all the android BCP classes were availible when compiling this test and access checks
+ // weren't a thing this function would be written as follows:
+ //
+ // long dexFilePtr = getDexFilePointer(klass);
+ // dalvik.system.BaseDexClassLoader loader =
+ // (dalvik.system.BaseDexClassLoader)klass.getClassLoader();
+ // dalvik.system.DexPathList pathListValue = loader.pathList;
+ // dalvik.system.DexPathList.Element[] elementArrayValue = pathListValue.dexElements;
+ // int array_length = elementArrayValue.length;
+ // for (int i = 0; i < array_length; i++) {
+ // dalvik.system.DexPathList.Element curElement = elementArrayValue[i];
+ // dalvik.system.DexFile curDexFile = curElement.dexFile;
+ // if (curDexFile == null) {
+ // continue;
+ // }
+ // long[] curCookie = (long[])curDexFile.mCookie;
+ // long[] curInternalCookie = (long[])curDexFile.mInternalCookie;
+ // if (arrayContains(curCookie, dexFilePtr) || arrayContains(curInternalCookie, dexFilePtr)) {
+ // return;
+ // }
+ // }
+ // throw new Exception(
+ // "Unable to find dex file pointer " + dexFilePtr + " in class loader for " + klass);
+
+ // Get all the fields and classes we need by reflection.
+ Class<?> baseDexClassLoaderClass = Class.forName("dalvik.system.BaseDexClassLoader");
+ Field pathListField = baseDexClassLoaderClass.getDeclaredField("pathList");
+
+ Class<?> dexPathListClass = Class.forName("dalvik.system.DexPathList");
+ Field elementArrayField = dexPathListClass.getDeclaredField("dexElements");
+
+ Class<?> dexPathListElementClass = Class.forName("dalvik.system.DexPathList$Element");
+ Field dexFileField = dexPathListElementClass.getDeclaredField("dexFile");
+
+ Class<?> dexFileClass = Class.forName("dalvik.system.DexFile");
+ Field dexFileCookieField = dexFileClass.getDeclaredField("mCookie");
+ Field dexFileInternalCookieField = dexFileClass.getDeclaredField("mInternalCookie");
+
+ // Make all the fields accessible
+ AccessibleObject.setAccessible(new AccessibleObject[] { pathListField,
+ elementArrayField,
+ dexFileField,
+ dexFileCookieField,
+ dexFileInternalCookieField }, true);
+
+ long dexFilePtr = getDexFilePointer(klass);
+
+ ClassLoader loader = klass.getClassLoader();
+ checkIsInstance(baseDexClassLoaderClass, loader);
+ // DexPathList pathListValue = ((BaseDexClassLoader) loader).pathList;
+ Object pathListValue = pathListField.get(loader);
+
+ checkIsInstance(dexPathListClass, pathListValue);
+
+ // DexPathList.Element[] elementArrayValue = pathListValue.dexElements;
+ Object elementArrayValue = elementArrayField.get(pathListValue);
+ if (!elementArrayValue.getClass().isArray() ||
+ elementArrayValue.getClass().getComponentType() != dexPathListElementClass) {
+ throw new Exception("elementArrayValue is not an " + dexPathListElementClass + " array!");
+ }
+ // int array_length = elementArrayValue.length;
+ int array_length = Array.getLength(elementArrayValue);
+ for (int i = 0; i < array_length; i++) {
+ // DexPathList.Element curElement = elementArrayValue[i];
+ Object curElement = Array.get(elementArrayValue, i);
+ checkIsInstance(dexPathListElementClass, curElement);
+
+ // DexFile curDexFile = curElement.dexFile;
+ Object curDexFile = dexFileField.get(curElement);
+ if (curDexFile == null) {
+ continue;
+ }
+ checkIsInstance(dexFileClass, curDexFile);
+
+ // long[] curCookie = (long[])curDexFile.mCookie;
+ long[] curCookie = (long[])dexFileCookieField.get(curDexFile);
+ // long[] curInternalCookie = (long[])curDexFile.mInternalCookie;
+ long[] curInternalCookie = (long[])dexFileInternalCookieField.get(curDexFile);
+
+ if (arrayContains(curCookie, dexFilePtr) || arrayContains(curInternalCookie, dexFilePtr)) {
+ return;
+ }
+ }
+ throw new Exception(
+ "Unable to find dex file pointer " + dexFilePtr + " in class loader for " + klass);
+ }
+
+ private static void doTest() throws Exception {
+ Transform t = new Transform();
+ Transform2 t2 = new Transform2();
+
+ long initial_t1_dex = getDexFilePointer(Transform.class);
+ long initial_t2_dex = getDexFilePointer(Transform2.class);
+ if (initial_t2_dex != initial_t1_dex) {
+ throw new Exception("The classes " + Transform.class + " and " + Transform2.class + " " +
+ "have different initial dex files!");
+ }
+ checkDexFileInClassLoader(Transform.class);
+ checkDexFileInClassLoader(Transform2.class);
+
+ // Make sure they are loaded
+ t.sayHi();
+ t2.sayHi();
+ // Redefine both of the classes.
+ doMultiClassRedefinition(TRANSFORM_DEFINITION, TRANSFORM2_DEFINITION);
+ // Make sure we actually transformed them!
+ t.sayHi();
+ t2.sayHi();
+
+ long final_t1_dex = getDexFilePointer(Transform.class);
+ long final_t2_dex = getDexFilePointer(Transform2.class);
+ if (final_t2_dex == final_t1_dex) {
+ throw new Exception("The classes " + Transform.class + " and " + Transform2.class + " " +
+ "have the same initial dex files!");
+ } else if (final_t1_dex == initial_t1_dex) {
+ throw new Exception("The class " + Transform.class + " did not get a new dex file!");
+ } else if (final_t2_dex == initial_t2_dex) {
+ throw new Exception("The class " + Transform2.class + " did not get a new dex file!");
+ }
+ // Check to make sure the new dex files are in the class loader.
+ checkDexFileInClassLoader(Transform.class);
+ checkDexFileInClassLoader(Transform2.class);
+ }
+
+ private static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+ ArrayList<Class<?>> classes = new ArrayList<>();
+ ArrayList<byte[]> class_files = new ArrayList<>();
+ ArrayList<byte[]> dex_files = new ArrayList<>();
+
+ for (CommonClassDefinition d : defs) {
+ classes.add(d.target);
+ class_files.add(d.class_file_bytes);
+ dex_files.add(d.dex_file_bytes);
+ }
+ doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+ class_files.toArray(new byte[0][]),
+ dex_files.toArray(new byte[0][]));
+ }
+
+ // Gets the 'long' (really a native pointer) that is stored in the ClassLoader representing the
+ // DexFile a class is loaded from. This is converted from the DexFile* in the same way it is done
+ // in runtime/native/dalvik_system_DexFile.cc
+ private static native long getDexFilePointer(Class<?> target);
+ // Transforms the classes
+ private static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+ byte[][] classfiles,
+ byte[][] dexfiles);
+}
diff --git a/test/944-transform-classloaders/src/Transform.java b/test/944-transform-classloaders/src/Transform.java
new file mode 100644
index 0000000..8e8af35
--- /dev/null
+++ b/test/944-transform-classloaders/src/Transform.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ public void sayHi() {
+ // Use lower 'h' to make sure the string will have a different string id
+ // than the transformation (the transformation code is the same except
+ // the actual printed String, which was making the test inacurately passing
+ // in JIT mode when loading the string from the dex cache, as the string ids
+ // of the two different strings were the same).
+ // We know the string ids will be different because lexicographically:
+ // "Goodbye" < "LTransform;" < "hello".
+ System.out.println("hello");
+ }
+}
diff --git a/test/944-transform-classloaders/src/Transform2.java b/test/944-transform-classloaders/src/Transform2.java
new file mode 100644
index 0000000..eb22842
--- /dev/null
+++ b/test/944-transform-classloaders/src/Transform2.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform2 {
+ public void sayHi() {
+ System.out.println("hello2");
+ }
+}
diff --git a/test/956-methodhandles/src/Main.java b/test/956-methodhandles/src/Main.java
index f8daba6..fc9f030 100644
--- a/test/956-methodhandles/src/Main.java
+++ b/test/956-methodhandles/src/Main.java
@@ -15,6 +15,7 @@
*/
import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandleInfo;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodHandles.Lookup;
import java.lang.invoke.MethodType;
@@ -77,6 +78,7 @@
testReturnValueConversions();
testVariableArity();
testVariableArity_MethodHandles_bind();
+ testRevealDirect();
}
public static void testfindSpecial_invokeSuperBehaviour() throws Throwable {
@@ -384,6 +386,10 @@
public String publicMethod() {
return "publicMethod";
}
+
+ public String publicVarArgsMethod(String... args) {
+ return "publicVarArgsMethod";
+ }
}
public static void testUnreflects() throws Throwable {
@@ -670,6 +676,13 @@
Integer.class, MethodType.methodType(Integer.class, Integer.class));
fail("Unexpected success for non-void type for findConstructor");
} catch (NoSuchMethodException e) {}
+
+ // Array class constructor.
+ try {
+ MethodHandle foo = MethodHandles.lookup().findConstructor(
+ Object[].class, MethodType.methodType(void.class));
+ fail("Unexpected success for array class type for findConstructor");
+ } catch (NoSuchMethodException e) {}
}
public static void testStringConstructors() throws Throwable {
@@ -1486,4 +1499,117 @@
fail();
} catch (WrongMethodTypeException e) {}
}
+
+ public static void testRevealDirect() throws Throwable {
+ // Test with a virtual method :
+ MethodType type = MethodType.methodType(String.class);
+ MethodHandle handle = MethodHandles.lookup().findVirtual(
+ UnreflectTester.class, "publicMethod", type);
+
+ // Comparisons with an equivalent member obtained via reflection :
+ MethodHandleInfo info = MethodHandles.lookup().revealDirect(handle);
+ Method meth = UnreflectTester.class.getMethod("publicMethod");
+
+ assertEquals(MethodHandleInfo.REF_invokeVirtual, info.getReferenceKind());
+ assertEquals("publicMethod", info.getName());
+ assertTrue(UnreflectTester.class == info.getDeclaringClass());
+ assertFalse(info.isVarArgs());
+ assertEquals(meth, info.reflectAs(Method.class, MethodHandles.lookup()));
+ assertEquals(type, info.getMethodType());
+
+ // Resolution via a public lookup should fail because the method in question
+ // isn't public.
+ try {
+ info.reflectAs(Method.class, MethodHandles.publicLookup());
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+
+ // Test with a static method :
+ handle = MethodHandles.lookup().findStatic(UnreflectTester.class,
+ "publicStaticMethod",
+ MethodType.methodType(String.class));
+
+ info = MethodHandles.lookup().revealDirect(handle);
+ meth = UnreflectTester.class.getMethod("publicStaticMethod");
+ assertEquals(MethodHandleInfo.REF_invokeStatic, info.getReferenceKind());
+ assertEquals("publicStaticMethod", info.getName());
+ assertTrue(UnreflectTester.class == info.getDeclaringClass());
+ assertFalse(info.isVarArgs());
+ assertEquals(meth, info.reflectAs(Method.class, MethodHandles.lookup()));
+ assertEquals(type, info.getMethodType());
+
+ // Test with a var-args method :
+ type = MethodType.methodType(String.class, String[].class);
+ handle = MethodHandles.lookup().findVirtual(UnreflectTester.class,
+ "publicVarArgsMethod", type);
+
+ info = MethodHandles.lookup().revealDirect(handle);
+ meth = UnreflectTester.class.getMethod("publicVarArgsMethod", String[].class);
+ assertEquals(MethodHandleInfo.REF_invokeVirtual, info.getReferenceKind());
+ assertEquals("publicVarArgsMethod", info.getName());
+ assertTrue(UnreflectTester.class == info.getDeclaringClass());
+ assertTrue(info.isVarArgs());
+ assertEquals(meth, info.reflectAs(Method.class, MethodHandles.lookup()));
+ assertEquals(type, info.getMethodType());
+
+ // Test with a constructor :
+ Constructor cons = UnreflectTester.class.getConstructor(String.class, boolean.class);
+ type = MethodType.methodType(void.class, String.class, boolean.class);
+ handle = MethodHandles.lookup().findConstructor(UnreflectTester.class, type);
+
+ info = MethodHandles.lookup().revealDirect(handle);
+ assertEquals(MethodHandleInfo.REF_newInvokeSpecial, info.getReferenceKind());
+ assertEquals("<init>", info.getName());
+ assertTrue(UnreflectTester.class == info.getDeclaringClass());
+ assertFalse(info.isVarArgs());
+ assertEquals(cons, info.reflectAs(Constructor.class, MethodHandles.lookup()));
+ assertEquals(type, info.getMethodType());
+
+ // Test with a static field :
+ Field field = UnreflectTester.class.getField("publicStaticField");
+
+ handle = MethodHandles.lookup().findStaticSetter(
+ UnreflectTester.class, "publicStaticField", String.class);
+
+ info = MethodHandles.lookup().revealDirect(handle);
+ assertEquals(MethodHandleInfo.REF_putStatic, info.getReferenceKind());
+ assertEquals("publicStaticField", info.getName());
+ assertTrue(UnreflectTester.class == info.getDeclaringClass());
+ assertFalse(info.isVarArgs());
+ assertEquals(field, info.reflectAs(Field.class, MethodHandles.lookup()));
+ assertEquals(MethodType.methodType(void.class, String.class), info.getMethodType());
+
+ // Test with a setter on the same field, the type of the handle should change
+ // but everything else must remain the same.
+ handle = MethodHandles.lookup().findStaticGetter(
+ UnreflectTester.class, "publicStaticField", String.class);
+ info = MethodHandles.lookup().revealDirect(handle);
+ assertEquals(MethodHandleInfo.REF_getStatic, info.getReferenceKind());
+ assertEquals(field, info.reflectAs(Field.class, MethodHandles.lookup()));
+ assertEquals(MethodType.methodType(String.class), info.getMethodType());
+
+ // Test with an instance field :
+ field = UnreflectTester.class.getField("publicField");
+
+ handle = MethodHandles.lookup().findSetter(
+ UnreflectTester.class, "publicField", String.class);
+
+ info = MethodHandles.lookup().revealDirect(handle);
+ assertEquals(MethodHandleInfo.REF_putField, info.getReferenceKind());
+ assertEquals("publicField", info.getName());
+ assertTrue(UnreflectTester.class == info.getDeclaringClass());
+ assertFalse(info.isVarArgs());
+ assertEquals(field, info.reflectAs(Field.class, MethodHandles.lookup()));
+ assertEquals(MethodType.methodType(void.class, String.class), info.getMethodType());
+
+ // Test with a setter on the same field, the type of the handle should change
+ // but everything else must remain the same.
+ handle = MethodHandles.lookup().findGetter(
+ UnreflectTester.class, "publicField", String.class);
+ info = MethodHandles.lookup().revealDirect(handle);
+ assertEquals(MethodHandleInfo.REF_getField, info.getReferenceKind());
+ assertEquals(field, info.reflectAs(Field.class, MethodHandles.lookup()));
+ assertEquals(MethodType.methodType(String.class), info.getMethodType());
+ }
}
diff --git a/test/957-methodhandle-transforms/expected.txt b/test/957-methodhandle-transforms/expected.txt
index 7540ef7..cf6b5a1 100644
--- a/test/957-methodhandle-transforms/expected.txt
+++ b/test/957-methodhandle-transforms/expected.txt
@@ -16,3 +16,63 @@
fallback: fallback, 42, 56
target: target, 42, 56
target: target, 42, 56
+a: a, b:b, c: c
+a: a, b:b, c: c
+a: a, b:b, c: c
+a: a, b:b, c: c
+a: a, b:b, c: c
+a: a, b:b, c: c
+a: a, b:b, c: c
+a: a, b:43
+a: a, b:43
+a: a, b:43
+a: a, b:43
+a: a, b:43
+a: a, b:b, c: c
+a: a, b:b, c: c
+a: a, b:b, c: c
+a: a, b:true, c: false
+a: a, b:true, c: false
+a: a, b:1, c: 2, d: 3, e: 4, f:5, g: 6.0, h: 7.0
+a: a, b:1, c: 2, d: 3, e: 4, f:5, g: 6.0, h: 7.0
+a: a, b:1, c: 2, d: 51, e: 52, f:53.0, g: 54.0
+a: a, b:1, c: 2, d: 51, e: 52, f:53.0, g: 54.0
+a: a, b:1, c: 2, d: 3, e: 4, f:5.0, g:6.0
+a: a, b:1, c: 2, d: 3, e: 4, f:5.0, g:6.0
+a: a, b:1, c: 2, d: 3, e: 4.0, f:5.0
+a: a, b:1, c: 2, d: 3, e: 4.0, f:5.0
+a: a, b:1, c: 2, d: 3.0, e: 4.0
+a: a, b:1, c: 2, d: 3.0, e: 4.0
+a: a, b:1.0, c: 2.0, d: 3.0
+a: a, b:1.0, c: 2.0, d: 3.0
+a: a, b:1.0, c: 2.0
+a: a, b:1.0, c: 2.0
+a: a, b:b, c: c
+a: a, b:b, c: c
+a: a, b:43
+a: a, b:b, c: c
+a: a, b:true, c: false
+a: a, b:1, c: 2
+a: a, b:a, c: b
+a: a, b:3, c: 4
+a: a, b:42, c: 43
+a: a, b:100, c: 99
+a: a, b:8.9, c: 9.1
+a: a, b:6.7, c: 7.8
+a: a, b: b, c:c, d:d
+a: a, b: b, c:c, d:d
+a: a, b: b, c:c, d:d
+a: a+b, b: c, c: d
+a: a, b: b+c, c: d
+a: a, b: b, c: c+d
+voidFilter
+a: a, b: b, c: c
+voidFilter
+a: a, b: b, c: c
+a: foo, b:45, c:56, d:bar
+a: foo, b:56, c:57, d:bar
+a: foo, b:56, c:57, d:bar
+a: foo, b:45, c:46, d:bar
+a: c+d ,b:c ,c:d ,d:e
+c+d
+a: a ,b:c ,c:d ,d:e
diff --git a/test/957-methodhandle-transforms/src/Main.java b/test/957-methodhandle-transforms/src/Main.java
index eebf55f..b6bbe74 100644
--- a/test/957-methodhandle-transforms/src/Main.java
+++ b/test/957-methodhandle-transforms/src/Main.java
@@ -34,6 +34,14 @@
testFilterReturnValue();
testPermuteArguments();
testInvokers();
+ testSpreaders_reference();
+ testSpreaders_primitive();
+ testInvokeWithArguments();
+ testAsCollector();
+ testFilterArguments();
+ testCollectArguments();
+ testInsertArguments();
+ testFoldArguments();
}
public static void testThrowException() throws Throwable {
@@ -921,6 +929,718 @@
}
}
+ public static int spreadReferences(String a, String b, String c) {
+ System.out.println("a: " + a + ", b:" + b + ", c: " + c);
+ return 42;
+ }
+
+ public static int spreadReferences_Unbox(String a, int b) {
+ System.out.println("a: " + a + ", b:" + b);
+ return 43;
+ }
+
+ public static void testSpreaders_reference() throws Throwable {
+ MethodType methodType = MethodType.methodType(int.class,
+ new Class<?>[] { String.class, String.class, String.class });
+ MethodHandle delegate = MethodHandles.lookup().findStatic(
+ Main.class, "spreadReferences", methodType);
+
+ // Basic checks on array lengths.
+ //
+ // Array size = 0
+ MethodHandle mhAsSpreader = delegate.asSpreader(String[].class, 0);
+ int ret = (int) mhAsSpreader.invoke("a", "b", "c", new String[] {});
+ assertEquals(42, ret);
+ // Array size = 1
+ mhAsSpreader = delegate.asSpreader(String[].class, 1);
+ ret = (int) mhAsSpreader.invoke("a", "b", new String[] { "c" });
+ assertEquals(42, ret);
+ // Array size = 2
+ mhAsSpreader = delegate.asSpreader(String[].class, 2);
+ ret = (int) mhAsSpreader.invoke("a", new String[] { "b", "c" });
+ assertEquals(42, ret);
+ // Array size = 3
+ mhAsSpreader = delegate.asSpreader(String[].class, 3);
+ ret = (int) mhAsSpreader.invoke(new String[] { "a", "b", "c"});
+ assertEquals(42, ret);
+
+ // Exception case, array size = 4 is illegal.
+ try {
+ delegate.asSpreader(String[].class, 4);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+
+ // Exception case, calling with an arg of the wrong size.
+ // Array size = 3
+ mhAsSpreader = delegate.asSpreader(String[].class, 3);
+ try {
+ ret = (int) mhAsSpreader.invoke(new String[] { "a", "b"});
+ } catch (IllegalArgumentException expected) {
+ }
+
+ // Various other hijinks, pass as Object[] arrays, Object etc.
+ mhAsSpreader = delegate.asSpreader(Object[].class, 2);
+ ret = (int) mhAsSpreader.invoke("a", new String[] { "b", "c" });
+ assertEquals(42, ret);
+
+ mhAsSpreader = delegate.asSpreader(Object[].class, 2);
+ ret = (int) mhAsSpreader.invoke("a", new Object[] { "b", "c" });
+ assertEquals(42, ret);
+
+ mhAsSpreader = delegate.asSpreader(Object[].class, 2);
+ ret = (int) mhAsSpreader.invoke("a", (Object) new Object[] { "b", "c" });
+ assertEquals(42, ret);
+
+ // Test implicit unboxing.
+ MethodType methodType2 = MethodType.methodType(int.class,
+ new Class<?>[] { String.class, int.class });
+ MethodHandle delegate2 = MethodHandles.lookup().findStatic(
+ Main.class, "spreadReferences_Unbox", methodType2);
+
+ // .. with an Integer[] array.
+ mhAsSpreader = delegate2.asSpreader(Integer[].class, 1);
+ ret = (int) mhAsSpreader.invoke("a", new Integer[] { 43 });
+ assertEquals(43, ret);
+
+ // .. with an Integer[] array declared as an Object[] argument type.
+ mhAsSpreader = delegate2.asSpreader(Object[].class, 1);
+ ret = (int) mhAsSpreader.invoke("a", new Integer[] { 43 });
+ assertEquals(43, ret);
+
+ // .. with an Object[] array.
+ mhAsSpreader = delegate2.asSpreader(Object[].class, 1);
+ ret = (int) mhAsSpreader.invoke("a", new Object[] { Integer.valueOf(43)});
+ assertEquals(43, ret);
+
+ // -- Part 2--
+ // Run a subset of these tests on MethodHandles.spreadInvoker, which only accepts
+ // a trailing argument type of Object[].
+ MethodHandle spreadInvoker = MethodHandles.spreadInvoker(methodType2, 1);
+ ret = (int) spreadInvoker.invoke(delegate2, "a", new Object[] { Integer.valueOf(43)});
+ assertEquals(43, ret);
+
+ ret = (int) spreadInvoker.invoke(delegate2, "a", new Integer[] { 43 });
+ assertEquals(43, ret);
+
+ // NOTE: Annoyingly, the second argument here is leadingArgCount and not
+ // arrayLength.
+ spreadInvoker = MethodHandles.spreadInvoker(methodType, 3);
+ ret = (int) spreadInvoker.invoke(delegate, "a", "b", "c", new String[] {});
+ assertEquals(42, ret);
+
+ spreadInvoker = MethodHandles.spreadInvoker(methodType, 0);
+ ret = (int) spreadInvoker.invoke(delegate, new String[] { "a", "b", "c" });
+ assertEquals(42, ret);
+
+ // Exact invokes: Double check that the expected parameter type is
+ // Object[] and not T[].
+ try {
+ spreadInvoker.invokeExact(delegate, new String[] { "a", "b", "c" });
+ fail();
+ } catch (WrongMethodTypeException expected) {
+ }
+
+ ret = (int) spreadInvoker.invoke(delegate, new Object[] { "a", "b", "c" });
+ assertEquals(42, ret);
+ }
+
+ public static int spreadBoolean(String a, Boolean b, boolean c) {
+ System.out.println("a: " + a + ", b:" + b + ", c: " + c);
+ return 44;
+ }
+
+ public static int spreadByte(String a, Byte b, byte c,
+ short d, int e, long f, float g, double h) {
+ System.out.println("a: " + a + ", b:" + b + ", c: " + c +
+ ", d: " + d + ", e: " + e + ", f:" + f + ", g: " + g +
+ ", h: " + h);
+ return 45;
+ }
+
+ public static int spreadChar(String a, Character b, char c,
+ int d, long e, float f, double g) {
+ System.out.println("a: " + a + ", b:" + b + ", c: " + c +
+ ", d: " + d + ", e: " + e + ", f:" + f + ", g: " + g);
+ return 46;
+ }
+
+ public static int spreadShort(String a, Short b, short c,
+ int d, long e, float f, double g) {
+ System.out.println("a: " + a + ", b:" + b + ", c: " + c +
+ ", d: " + d + ", e: " + e + ", f:" + f + ", g:" + g);
+ return 47;
+ }
+
+ public static int spreadInt(String a, Integer b, int c,
+ long d, float e, double f) {
+ System.out.println("a: " + a + ", b:" + b + ", c: " + c +
+ ", d: " + d + ", e: " + e + ", f:" + f);
+ return 48;
+ }
+
+ public static int spreadLong(String a, Long b, long c, float d, double e) {
+ System.out.println("a: " + a + ", b:" + b + ", c: " + c +
+ ", d: " + d + ", e: " + e);
+ return 49;
+ }
+
+ public static int spreadFloat(String a, Float b, float c, double d) {
+ System.out.println("a: " + a + ", b:" + b + ", c: " + c + ", d: " + d);
+ return 50;
+ }
+
+ public static int spreadDouble(String a, Double b, double c) {
+ System.out.println("a: " + a + ", b:" + b + ", c: " + c);
+ return 51;
+ }
+
+ public static void testSpreaders_primitive() throws Throwable {
+ // boolean[]
+ // ---------------------
+ MethodType type = MethodType.methodType(int.class,
+ new Class<?>[] { String.class, Boolean.class, boolean.class });
+ MethodHandle delegate = MethodHandles.lookup().findStatic(
+ Main.class, "spreadBoolean", type);
+
+ MethodHandle spreader = delegate.asSpreader(boolean[].class, 2);
+ int ret = (int) spreader.invokeExact("a", new boolean[] { true, false });
+ assertEquals(44, ret);
+ ret = (int) spreader.invoke("a", new boolean[] { true, false });
+ assertEquals(44, ret);
+
+ // boolean can't be cast to String (the first argument to the method).
+ try {
+ delegate.asSpreader(boolean[].class, 3);
+ fail();
+ } catch (WrongMethodTypeException expected) {
+ }
+
+ // int can't be cast to boolean to supply the last argument to the method.
+ try {
+ delegate.asSpreader(int[].class, 1);
+ fail();
+ } catch (WrongMethodTypeException expected) {
+ }
+
+ // byte[]
+ // ---------------------
+ type = MethodType.methodType(int.class,
+ new Class<?>[] {
+ String.class, Byte.class, byte.class,
+ short.class, int.class, long.class,
+ float.class, double.class });
+ delegate = MethodHandles.lookup().findStatic(Main.class, "spreadByte", type);
+
+ spreader = delegate.asSpreader(byte[].class, 7);
+ ret = (int) spreader.invokeExact("a",
+ new byte[] { 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 });
+ assertEquals(45, ret);
+ ret = (int) spreader.invoke("a",
+ new byte[] { 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 });
+ assertEquals(45, ret);
+
+ // char[]
+ // ---------------------
+ type = MethodType.methodType(int.class,
+ new Class<?>[] {
+ String.class, Character.class,char.class,
+ int.class, long.class, float.class, double.class });
+ delegate = MethodHandles.lookup().findStatic(Main.class, "spreadChar", type);
+
+ spreader = delegate.asSpreader(char[].class, 6);
+ ret = (int) spreader.invokeExact("a",
+ new char[] { '1', '2', '3', '4', '5', '6' });
+ assertEquals(46, ret);
+ ret = (int) spreader.invokeExact("a",
+ new char[] { '1', '2', '3', '4', '5', '6' });
+ assertEquals(46, ret);
+
+ // short[]
+ // ---------------------
+ type = MethodType.methodType(int.class,
+ new Class<?>[] {
+ String.class, Short.class, short.class,
+ int.class, long.class, float.class, double.class });
+ delegate = MethodHandles.lookup().findStatic(Main.class, "spreadShort", type);
+
+ spreader = delegate.asSpreader(short[].class, 6);
+ ret = (int) spreader.invokeExact("a",
+ new short[] { 0x1, 0x2, 0x3, 0x4, 0x5, 0x6 });
+ assertEquals(47, ret);
+ ret = (int) spreader.invoke("a",
+ new short[] { 0x1, 0x2, 0x3, 0x4, 0x5, 0x6 });
+ assertEquals(47, ret);
+
+ // int[]
+ // ---------------------
+ type = MethodType.methodType(int.class,
+ new Class<?>[] {
+ String.class, Integer.class, int.class,
+ long.class, float.class, double.class });
+ delegate = MethodHandles.lookup().findStatic(Main.class, "spreadInt", type);
+
+ spreader = delegate.asSpreader(int[].class, 5);
+ ret = (int) spreader.invokeExact("a", new int[] { 1, 2, 3, 4, 5 });
+ assertEquals(48, ret);
+ ret = (int) spreader.invokeExact("a", new int[] { 1, 2, 3, 4, 5 });
+ assertEquals(48, ret);
+
+ // long[]
+ // ---------------------
+ type = MethodType.methodType(int.class,
+ new Class<?>[] {
+ String.class, Long.class, long.class, float.class, double.class });
+ delegate = MethodHandles.lookup().findStatic(Main.class, "spreadLong", type);
+
+ spreader = delegate.asSpreader(long[].class, 4);
+ ret = (int) spreader.invokeExact("a",
+ new long[] { 0x1, 0x2, 0x3, 0x4 });
+ assertEquals(49, ret);
+ ret = (int) spreader.invoke("a",
+ new long[] { 0x1, 0x2, 0x3, 0x4 });
+ assertEquals(49, ret);
+
+ // float[]
+ // ---------------------
+ type = MethodType.methodType(int.class,
+ new Class<?>[] {
+ String.class, Float.class, float.class, double.class });
+ delegate = MethodHandles.lookup().findStatic(Main.class, "spreadFloat", type);
+
+ spreader = delegate.asSpreader(float[].class, 3);
+ ret = (int) spreader.invokeExact("a",
+ new float[] { 1.0f, 2.0f, 3.0f });
+ assertEquals(50, ret);
+ ret = (int) spreader.invokeExact("a",
+ new float[] { 1.0f, 2.0f, 3.0f });
+ assertEquals(50, ret);
+
+ // double[]
+ // ---------------------
+ type = MethodType.methodType(int.class,
+ new Class<?>[] { String.class, Double.class, double.class });
+ delegate = MethodHandles.lookup().findStatic(Main.class, "spreadDouble", type);
+
+ spreader = delegate.asSpreader(double[].class, 2);
+ ret = (int) spreader.invokeExact("a", new double[] { 1.0, 2.0 });
+ assertEquals(51, ret);
+ ret = (int) spreader.invokeExact("a", new double[] { 1.0, 2.0 });
+ assertEquals(51, ret);
+ }
+
+ public static void testInvokeWithArguments() throws Throwable {
+ MethodType methodType = MethodType.methodType(int.class,
+ new Class<?>[] { String.class, String.class, String.class });
+ MethodHandle handle = MethodHandles.lookup().findStatic(
+ Main.class, "spreadReferences", methodType);
+
+ Object ret = handle.invokeWithArguments(new Object[] { "a", "b", "c"});
+ assertEquals(42, (int) ret);
+ handle.invokeWithArguments(new String[] { "a", "b", "c" });
+ assertEquals(42, (int) ret);
+
+ // Pass in an array that's too small. Should throw an IAE.
+ try {
+ handle.invokeWithArguments(new Object[] { "a", "b" });
+ fail();
+ } catch (IllegalArgumentException expected) {
+ } catch (WrongMethodTypeException expected) {
+ }
+
+ // Test implicit unboxing.
+ MethodType methodType2 = MethodType.methodType(int.class,
+ new Class<?>[] { String.class, int.class });
+ MethodHandle handle2 = MethodHandles.lookup().findStatic(
+ Main.class, "spreadReferences_Unbox", methodType2);
+
+ ret = (int) handle2.invokeWithArguments(new Object[] { "a", 43 });
+ assertEquals(43, (int) ret);
+ }
+
+ public static int collectBoolean(String a, boolean[] b) {
+ System.out.println("a: " + a + ", b:" + b[0] + ", c: " + b[1]);
+ return 44;
+ }
+
+ public static int collectByte(String a, byte[] b) {
+ System.out.println("a: " + a + ", b:" + b[0] + ", c: " + b[1]);
+ return 45;
+ }
+
+ public static int collectChar(String a, char[] b) {
+ System.out.println("a: " + a + ", b:" + b[0] + ", c: " + b[1]);
+ return 46;
+ }
+
+ public static int collectShort(String a, short[] b) {
+ System.out.println("a: " + a + ", b:" + b[0] + ", c: " + b[1]);
+ return 47;
+ }
+
+ public static int collectInt(String a, int[] b) {
+ System.out.println("a: " + a + ", b:" + b[0] + ", c: " + b[1]);
+ return 48;
+ }
+
+ public static int collectLong(String a, long[] b) {
+ System.out.println("a: " + a + ", b:" + b[0] + ", c: " + b[1]);
+ return 49;
+ }
+
+ public static int collectFloat(String a, float[] b) {
+ System.out.println("a: " + a + ", b:" + b[0] + ", c: " + b[1]);
+ return 50;
+ }
+
+ public static int collectDouble(String a, double[] b) {
+ System.out.println("a: " + a + ", b:" + b[0] + ", c: " + b[1]);
+ return 51;
+ }
+
+ public static int collectCharSequence(String a, CharSequence[] b) {
+ System.out.println("a: " + a + ", b:" + b[0] + ", c: " + b[1]);
+ return 99;
+ }
+
+ public static void testAsCollector() throws Throwable {
+ // Reference arrays.
+ // -------------------
+ MethodHandle trailingRef = MethodHandles.lookup().findStatic(
+ Main.class, "collectCharSequence",
+ MethodType.methodType(int.class, String.class, CharSequence[].class));
+
+ // int[] is not convertible to CharSequence[].class.
+ try {
+ trailingRef.asCollector(int[].class, 1);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+
+ // Object[] is not convertible to CharSequence[].class.
+ try {
+ trailingRef.asCollector(Object[].class, 1);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+
+ // String[].class is convertible to CharSequence.class
+ MethodHandle collector = trailingRef.asCollector(String[].class, 2);
+ assertEquals(99, (int) collector.invoke("a", "b", "c"));
+
+ // Too few arguments should fail with a WMTE.
+ try {
+ collector.invoke("a", "b");
+ fail();
+ } catch (WrongMethodTypeException expected) {
+ }
+
+ // Too many arguments should fail with a WMTE.
+ try {
+ collector.invoke("a", "b", "c", "d");
+ fail();
+ } catch (WrongMethodTypeException expected) {
+ }
+
+ // Sanity checks on other array types.
+
+ MethodHandle target = MethodHandles.lookup().findStatic(
+ Main.class, "collectBoolean",
+ MethodType.methodType(int.class, String.class, boolean[].class));
+ assertEquals(44, (int) target.asCollector(boolean[].class, 2).invoke("a", true, false));
+
+ target = MethodHandles.lookup().findStatic(Main.class, "collectByte",
+ MethodType.methodType(int.class, String.class, byte[].class));
+ assertEquals(45, (int) target.asCollector(byte[].class, 2).invoke("a", (byte) 1, (byte) 2));
+
+ target = MethodHandles.lookup().findStatic(Main.class, "collectChar",
+ MethodType.methodType(int.class, String.class, char[].class));
+ assertEquals(46, (int) target.asCollector(char[].class, 2).invoke("a", 'a', 'b'));
+
+ target = MethodHandles.lookup().findStatic(Main.class, "collectShort",
+ MethodType.methodType(int.class, String.class, short[].class));
+ assertEquals(47, (int) target.asCollector(short[].class, 2).invoke("a", (short) 3, (short) 4));
+
+ target = MethodHandles.lookup().findStatic(Main.class, "collectInt",
+ MethodType.methodType(int.class, String.class, int[].class));
+ assertEquals(48, (int) target.asCollector(int[].class, 2).invoke("a", 42, 43));
+
+ target = MethodHandles.lookup().findStatic(Main.class, "collectLong",
+ MethodType.methodType(int.class, String.class, long[].class));
+ assertEquals(49, (int) target.asCollector(long[].class, 2).invoke("a", 100, 99));
+
+ target = MethodHandles.lookup().findStatic(Main.class, "collectFloat",
+ MethodType.methodType(int.class, String.class, float[].class));
+ assertEquals(50, (int) target.asCollector(float[].class, 2).invoke("a", 8.9f, 9.1f));
+
+ target = MethodHandles.lookup().findStatic(Main.class, "collectDouble",
+ MethodType.methodType(int.class, String.class, double[].class));
+ assertEquals(51, (int) target.asCollector(double[].class, 2).invoke("a", 6.7, 7.8));
+ }
+
+ public static String filter1(char a) {
+ return String.valueOf(a);
+ }
+
+ public static char filter2(String b) {
+ return b.charAt(0);
+ }
+
+ public static String badFilter1(char a, char b) {
+ return "bad";
+ }
+
+ public static int filterTarget(String a, char b, String c, char d) {
+ System.out.println("a: " + a + ", b: " + b + ", c:" + c + ", d:" + d);
+ return 56;
+ }
+
+ public static void testFilterArguments() throws Throwable {
+ MethodHandle filter1 = MethodHandles.lookup().findStatic(
+ Main.class, "filter1", MethodType.methodType(String.class, char.class));
+ MethodHandle filter2 = MethodHandles.lookup().findStatic(
+ Main.class, "filter2", MethodType.methodType(char.class, String.class));
+
+ MethodHandle target = MethodHandles.lookup().findStatic(
+ Main.class, "filterTarget", MethodType.methodType(int.class,
+ String.class, char.class, String.class, char.class));
+
+ // In all the cases below, the values printed will be 'a', 'b', 'c', 'd'.
+
+ // Filter arguments [0, 1] - all other arguments are passed through
+ // as is.
+ MethodHandle adapter = MethodHandles.filterArguments(
+ target, 0, filter1, filter2);
+ assertEquals(56, (int) adapter.invokeExact('a', "bXXXX", "c", 'd'));
+
+ // Filter arguments [1, 2].
+ adapter = MethodHandles.filterArguments(target, 1, filter2, filter1);
+ assertEquals(56, (int) adapter.invokeExact("a", "bXXXX", 'c', 'd'));
+
+ // Filter arguments [2, 3].
+ adapter = MethodHandles.filterArguments(target, 2, filter1, filter2);
+ assertEquals(56, (int) adapter.invokeExact("a", 'b', 'c', "dXXXXX"));
+
+ // Try out a few error cases :
+
+ // The return types of the filter doesn't align with the expected argument
+ // type of the target.
+ try {
+ adapter = MethodHandles.filterArguments(target, 2, filter2, filter1);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+
+ // There are more filters than arguments.
+ try {
+ adapter = MethodHandles.filterArguments(target, 3, filter2, filter1);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+
+ // We pass in an obviously bogus position.
+ try {
+ adapter = MethodHandles.filterArguments(target, -1, filter2, filter1);
+ fail();
+ } catch (ArrayIndexOutOfBoundsException expected) {
+ }
+
+ // We pass in a function that has more than one argument.
+ MethodHandle badFilter1 = MethodHandles.lookup().findStatic(
+ Main.class, "badFilter1",
+ MethodType.methodType(String.class, char.class, char.class));
+
+ try {
+ adapter = MethodHandles.filterArguments(target, 0, badFilter1, filter2);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+ }
+
+ static void voidFilter(char a, char b) {
+ System.out.println("voidFilter");
+ }
+
+ static String filter(char a, char b) {
+ return String.valueOf(a) + "+" + b;
+ }
+
+ static char badFilter(char a, char b) {
+ return 0;
+ }
+
+ static int target(String a, String b, String c) {
+ System.out.println("a: " + a + ", b: " + b + ", c: " + c);
+ return 57;
+ }
+
+ public static void testCollectArguments() throws Throwable {
+ // Test non-void filters.
+ MethodHandle filter = MethodHandles.lookup().findStatic(
+ Main.class, "filter",
+ MethodType.methodType(String.class, char.class, char.class));
+
+ MethodHandle target = MethodHandles.lookup().findStatic(
+ Main.class, "target",
+ MethodType.methodType(int.class, String.class, String.class, String.class));
+
+ // Filter at position 0.
+ MethodHandle adapter = MethodHandles.collectArguments(target, 0, filter);
+ assertEquals(57, (int) adapter.invokeExact('a', 'b', "c", "d"));
+
+ // Filter at position 1.
+ adapter = MethodHandles.collectArguments(target, 1, filter);
+ assertEquals(57, (int) adapter.invokeExact("a", 'b', 'c', "d"));
+
+ // Filter at position 2.
+ adapter = MethodHandles.collectArguments(target, 2, filter);
+ assertEquals(57, (int) adapter.invokeExact("a", "b", 'c', 'd'));
+
+ // Test void filters. Note that we're passing in one more argument
+ // than usual because the filter returns nothing - we have to invoke with
+ // the full set of filter args and the full set of target args.
+ filter = MethodHandles.lookup().findStatic(Main.class, "voidFilter",
+ MethodType.methodType(void.class, char.class, char.class));
+ adapter = MethodHandles.collectArguments(target, 0, filter);
+ assertEquals(57, (int) adapter.invokeExact('a', 'b', "a", "b", "c"));
+
+ adapter = MethodHandles.collectArguments(target, 1, filter);
+ assertEquals(57, (int) adapter.invokeExact("a", 'a', 'b', "b", "c"));
+
+ // Test out a few failure cases.
+ filter = MethodHandles.lookup().findStatic(
+ Main.class, "filter",
+ MethodType.methodType(String.class, char.class, char.class));
+
+ // Bogus filter position.
+ try {
+ adapter = MethodHandles.collectArguments(target, 3, filter);
+ fail();
+ } catch (IndexOutOfBoundsException expected) {
+ }
+
+ // Mismatch in filter return type.
+ filter = MethodHandles.lookup().findStatic(
+ Main.class, "badFilter",
+ MethodType.methodType(char.class, char.class, char.class));
+ try {
+ adapter = MethodHandles.collectArguments(target, 0, filter);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+ }
+
+ static int insertReceiver(String a, int b, Integer c, String d) {
+ System.out.println("a: " + a + ", b:" + b + ", c:" + c + ", d:" + d);
+ return 73;
+ }
+
+ public static void testInsertArguments() throws Throwable {
+ MethodHandle target = MethodHandles.lookup().findStatic(
+ Main.class, "insertReceiver",
+ MethodType.methodType(int.class,
+ String.class, int.class, Integer.class, String.class));
+
+ // Basic single element array inserted at position 0.
+ MethodHandle adapter = MethodHandles.insertArguments(
+ target, 0, new Object[] { "foo" });
+ assertEquals(73, (int) adapter.invokeExact(45, Integer.valueOf(56), "bar"));
+
+ // Exercise unboxing.
+ adapter = MethodHandles.insertArguments(
+ target, 1, new Object[] { Integer.valueOf(56), 57 });
+ assertEquals(73, (int) adapter.invokeExact("foo", "bar"));
+
+ // Exercise a widening conversion.
+ adapter = MethodHandles.insertArguments(
+ target, 1, new Object[] { (short) 56, Integer.valueOf(57) });
+ assertEquals(73, (int) adapter.invokeExact("foo", "bar"));
+
+ // Insert an argument at the last position.
+ adapter = MethodHandles.insertArguments(
+ target, 3, new Object[] { "bar" });
+ assertEquals(73, (int) adapter.invokeExact("foo", 45, Integer.valueOf(46)));
+
+ // Exercise a few error cases.
+
+ // A reference type that can't be cast to another reference type.
+ try {
+ MethodHandles.insertArguments(target, 3, new Object[] { new Object() });
+ fail();
+ } catch (ClassCastException expected) {
+ }
+
+ // A boxed type that can't be unboxed correctly.
+ try {
+ MethodHandles.insertArguments(target, 1, new Object[] { Long.valueOf(56) });
+ fail();
+ } catch (ClassCastException expected) {
+ }
+ }
+
+ public static String foldFilter(char a, char b) {
+ return String.valueOf(a) + "+" + b;
+ }
+
+ public static void voidFoldFilter(String e, char a, char b) {
+ System.out.println(String.valueOf(a) + "+" + b);
+ }
+
+ public static int foldTarget(String a, char b, char c, String d) {
+ System.out.println("a: " + a + " ,b:" + b + " ,c:" + c + " ,d:" + d);
+ return 89;
+ }
+
+ public static void mismatchedVoidFilter(Integer a) {
+ }
+
+ public static Integer mismatchedNonVoidFilter(char a, char b) {
+ return null;
+ }
+
+ public static void testFoldArguments() throws Throwable {
+ // Test non-void filters.
+ MethodHandle filter = MethodHandles.lookup().findStatic(
+ Main.class, "foldFilter",
+ MethodType.methodType(String.class, char.class, char.class));
+
+ MethodHandle target = MethodHandles.lookup().findStatic(
+ Main.class, "foldTarget",
+ MethodType.methodType(int.class, String.class,
+ char.class, char.class, String.class));
+
+ // Folder with a non-void type.
+ MethodHandle adapter = MethodHandles.foldArguments(target, filter);
+ assertEquals(89, (int) adapter.invokeExact('c', 'd', "e"));
+
+ // Folder with a void type.
+ filter = MethodHandles.lookup().findStatic(
+ Main.class, "voidFoldFilter",
+ MethodType.methodType(void.class, String.class, char.class, char.class));
+ adapter = MethodHandles.foldArguments(target, filter);
+ assertEquals(89, (int) adapter.invokeExact("a", 'c', 'd', "e"));
+
+ // Test a few erroneous cases.
+
+ filter = MethodHandles.lookup().findStatic(
+ Main.class, "mismatchedVoidFilter",
+ MethodType.methodType(void.class, Integer.class));
+ try {
+ adapter = MethodHandles.foldArguments(target, filter);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+
+ filter = MethodHandles.lookup().findStatic(
+ Main.class, "mismatchedNonVoidFilter",
+ MethodType.methodType(Integer.class, char.class, char.class));
+ try {
+ adapter = MethodHandles.foldArguments(target, filter);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+ }
+
public static void fail() {
System.out.println("FAIL");
Thread.dumpStack();
@@ -931,6 +1651,10 @@
Thread.dumpStack();
}
+ public static void assertEquals(int i1, int i2) {
+ if (i1 != i2) throw new AssertionError("Expected: " + i1 + " was " + i2);
+ }
+
public static void assertEquals(String s1, String s2) {
if (s1 == s2) {
return;
diff --git a/test/Android.bp b/test/Android.bp
index 89e4092..d3244a6 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -173,12 +173,13 @@
whole_static_libs: [
"libart-compiler-gtest",
"libart-runtime-gtest",
- "libgtest",
+ "libgtest"
],
shared_libs: [
"libartd",
"libartd-compiler",
"libbase",
+ "libbacktrace"
],
target: {
android: {
@@ -271,6 +272,8 @@
"929-search/search.cc",
"931-agent-thread/agent_thread.cc",
"933-misc-events/misc_events.cc",
+ "936-search-onload/search_onload.cc",
+ "944-transform-classloaders/classloader.cc",
],
shared_libs: [
"libbase",
@@ -317,6 +320,7 @@
"141-class-unload/jni_unload.cc",
"148-multithread-gc-annotations/gc_coverage.cc",
"149-suspend-all-stress/suspend_all.cc",
+ "154-gc-loop/heap_interface.cc",
"454-get-vreg/get_vreg_jni.cc",
"457-regs/regs_jni.cc",
"461-get-reference-vreg/get_reference_vreg_jni.cc",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 814f968..742353d 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -136,9 +136,9 @@
COMPILER_TYPES += regalloc_gc
OPTIMIZING_COMPILER_TYPES += regalloc_gc
endif
-RELOCATE_TYPES := relocate
-ifeq ($(ART_TEST_RUN_TEST_NO_RELOCATE),true)
- RELOCATE_TYPES += no-relocate
+RELOCATE_TYPES := no-relocate
+ifeq ($(ART_TEST_RUN_TEST_RELOCATE),true)
+ RELOCATE_TYPES += relocate
endif
ifeq ($(ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT),true)
RELOCATE_TYPES += relocate-npatchoat
@@ -161,7 +161,9 @@
ifeq ($(ART_TEST_JNI_FORCECOPY),true)
JNI_TYPES += forcecopy
endif
+ifeq ($(ART_TEST_RUN_TEST_IMAGE),true)
IMAGE_TYPES := picimage
+endif
ifeq ($(ART_TEST_RUN_TEST_NO_IMAGE),true)
IMAGE_TYPES += no-image
endif
@@ -379,6 +381,7 @@
# slows down allocations significantly which these tests do a lot.
TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \
137-cfi \
+ 154-gc-loop \
908-gc-start-finish \
913-heaps \
961-default-iface-resolution-gen \
@@ -436,13 +439,14 @@
629-vdex-speed
# This test fails without an image.
-# 018, 961, 964 often time out. b/34369284
+# 018, 961, 964, 968 often time out. b/34369284
TEST_ART_BROKEN_NO_IMAGE_RUN_TESTS := \
137-cfi \
138-duplicate-classes-check \
018-stack-overflow \
961-default-iface-resolution-gen \
- 964-default-iface-init
+ 964-default-iface-init \
+ 968-default-partial-compile-gen \
ifneq (,$(filter no-dex2oat,$(PREBUILD_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),no-dex2oat, \
@@ -536,12 +540,14 @@
# flaky as JIT tests. This should be fixed once b/33630159 or b/33616143 are
# resolved but until then just disable them. Test 916 already checks this
# feature for JIT use cases in a way that is resilient to the jit frames.
+# 912: b/34655682
TEST_ART_BROKEN_JIT_RUN_TESTS := \
137-cfi \
629-vdex-speed \
902-hello-transformation \
904-object-allocation \
906-iterate-heap \
+ 912-classes \
914-hello-obsolescence \
915-obsolete-2 \
917-fields-transformation \
@@ -602,6 +608,7 @@
TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS := \
454-get-vreg \
457-regs \
+ 602-deoptimizeable
ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -704,8 +711,10 @@
TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS :=
-# Tests that check semantics for a non-debuggable app.
+# 909: Tests that check semantics for a non-debuggable app.
+# 137: relies on AOT code and debuggable makes us JIT always.
TEST_ART_BROKEN_DEBUGGABLE_RUN_TESTS := \
+ 137-cfi \
909-attach-agent \
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
diff --git a/test/ErroneousInit/ErroneousInit.java b/test/ErroneousInit/ErroneousInit.java
new file mode 100644
index 0000000..67b7b20
--- /dev/null
+++ b/test/ErroneousInit/ErroneousInit.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class ErroneousInit {
+ static {
+ if (true) {
+ throw new Error();
+ }
+ }
+}
diff --git a/test/Nested/Nested.java b/test/Nested/Nested.java
index 78b273b..f493989 100644
--- a/test/Nested/Nested.java
+++ b/test/Nested/Nested.java
@@ -17,4 +17,6 @@
class Nested {
class Inner {
}
+ Object x = new Object() {
+ };
}
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 28fa130..186a151 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -39,7 +39,7 @@
PATCHOAT=""
PREBUILD="y"
QUIET="n"
-RELOCATE="y"
+RELOCATE="n"
STRIP_DEX="n"
SECONDARY_DEX=""
TIME_OUT="gdb" # "n" (disabled), "timeout" (use timeout), "gdb" (use gdb)
@@ -344,7 +344,7 @@
else
FLAGS="${FLAGS} -agentpath:${agent}=${TEST_NAME},art"
FLAGS="${FLAGS} -Xplugin:${plugin}"
- FLAGS="${FLAGS} -Xfully-deoptable"
+ FLAGS="${FLAGS} -Xcompiler-option --debuggable"
# Always make the compilation be debuggable.
COMPILE_FLAGS="${COMPILE_FLAGS} --debuggable"
fi
@@ -364,6 +364,8 @@
if [ "$HAVE_IMAGE" = "n" ]; then
+ # Add 5 minutes to give some time to generate the boot image.
+ TIME_OUT_VALUE=$((${TIME_OUT_VALUE} + 300))
DALVIKVM_BOOT_OPT="-Ximage:/system/non-existant/core.art"
else
DALVIKVM_BOOT_OPT="-Ximage:${BOOT_IMAGE}"
diff --git a/test/run-test b/test/run-test
index c78fa35..27c700e 100755
--- a/test/run-test
+++ b/test/run-test
@@ -111,7 +111,7 @@
dev_mode="no"
update_mode="no"
debug_mode="no"
-relocate="yes"
+relocate="no"
runtime="art"
usage="no"
build_only="no"
@@ -156,6 +156,7 @@
shift
elif [ "x$1" = "x--jvm" ]; then
target_mode="no"
+ DEX_LOCATION="$tmp_dir"
runtime="jvm"
image_args=""
prebuild_mode="no"
@@ -408,7 +409,9 @@
mkdir -p $tmp_dir
# Add thread suspend timeout flag
-run_args="${run_args} --runtime-option -XX:ThreadSuspendTimeout=$suspend_timeout"
+if [ ! "$runtime" = "jvm" ]; then
+ run_args="${run_args} --runtime-option -XX:ThreadSuspendTimeout=$suspend_timeout"
+fi
if [ "$basic_verify" = "true" ]; then
# Set HspaceCompactForOOMMinIntervalMs to zero to run hspace compaction for OOM more frequently in tests.
@@ -624,8 +627,8 @@
echo " --strip-dex Strip the dex files before starting test."
echo " --relocate Force the use of relocating in the test, making"
echo " the image and oat files be relocated to a random"
- echo " address before running. (default)"
- echo " --no-relocate Force the use of no relocating in the test"
+ echo " address before running."
+ echo " --no-relocate Force the use of no relocating in the test. (default)"
echo " --image Run the test using a precompiled boot image. (default)"
echo " --no-image Run the test without a precompiled boot image."
echo " --host Use the host-mode virtual machine."
diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc
index 80e1797..ea6359e 100644
--- a/test/ti-agent/common_helper.cc
+++ b/test/ti-agent/common_helper.cc
@@ -210,6 +210,7 @@
// Map from class name to transformation result.
std::map<std::string, std::deque<CommonTransformationResult>> gTransformations;
+bool gPopTransformations = true;
extern "C" JNIEXPORT void JNICALL Java_Main_addCommonTransformationResult(JNIEnv* env,
jclass,
@@ -266,7 +267,32 @@
memcpy(new_data, desired_array.data(), desired_array.size());
*new_class_data = new_data;
*new_class_data_len = desired_array.size();
+ if (gPopTransformations) {
+ gTransformations[name_str].pop_front();
+ }
+ }
+}
+
+extern "C" JNIEXPORT void Java_Main_setPopRetransformations(JNIEnv*,
+ jclass,
+ jboolean enable) {
+ gPopTransformations = enable;
+}
+
+extern "C" JNIEXPORT void Java_Main_popTransformationFor(JNIEnv* env,
+ jclass,
+ jstring class_name) {
+ const char* name_chrs = env->GetStringUTFChars(class_name, nullptr);
+ std::string name_str(name_chrs);
+ env->ReleaseStringUTFChars(class_name, name_chrs);
+ if (gTransformations.find(name_str) != gTransformations.end() &&
+ gTransformations[name_str].size() > 0) {
gTransformations[name_str].pop_front();
+ } else {
+ std::stringstream err;
+ err << "No transformations found for class " << name_str;
+ std::string message = err.str();
+ env->ThrowNew(env->FindClass("java/lang/Exception"), message.c_str());
}
}
@@ -301,11 +327,36 @@
}
}
-// TODO Write something useful.
extern "C" JNIEXPORT void JNICALL Java_Main_doCommonClassRetransformation(JNIEnv* env,
jclass,
jobjectArray targets) {
- DoClassRetransformation(jvmti_env, env, targets);
+ jvmtiCapabilities caps;
+ jvmtiError caps_err = jvmti_env->GetCapabilities(&caps);
+ if (caps_err != JVMTI_ERROR_NONE) {
+ env->ThrowNew(env->FindClass("java/lang/Exception"),
+ "Unable to get current jvmtiEnv capabilities");
+ return;
+ }
+
+ // Allocate a new environment if we don't have the can_retransform_classes capability needed to
+ // call the RetransformClasses function.
+ jvmtiEnv* real_env = nullptr;
+ if (caps.can_retransform_classes != 1) {
+ JavaVM* vm = nullptr;
+ if (env->GetJavaVM(&vm) != 0 ||
+ vm->GetEnv(reinterpret_cast<void**>(&real_env), JVMTI_VERSION_1_0) != 0) {
+ env->ThrowNew(env->FindClass("java/lang/Exception"),
+ "Unable to create temporary jvmtiEnv for RetransformClasses call.");
+ return;
+ }
+ SetAllCapabilities(real_env);
+ } else {
+ real_env = jvmti_env;
+ }
+ DoClassRetransformation(real_env, env, targets);
+ if (caps.can_retransform_classes != 1) {
+ real_env->DisposeEnvironment();
+ }
}
// Get all capabilities except those related to retransformation.
@@ -329,6 +380,38 @@
} // namespace common_retransform
+namespace common_transform {
+
+using art::common_retransform::CommonClassFileLoadHookRetransformable;
+
+// Get all capabilities except those related to retransformation.
+jint OnLoad(JavaVM* vm,
+ char* options ATTRIBUTE_UNUSED,
+ void* reserved ATTRIBUTE_UNUSED) {
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ // Don't set the retransform caps
+ jvmtiCapabilities caps;
+ jvmti_env->GetPotentialCapabilities(&caps);
+ caps.can_retransform_classes = 0;
+ caps.can_retransform_any_class = 0;
+ jvmti_env->AddCapabilities(&caps);
+
+ // Use the same callback as the retransform test.
+ jvmtiEventCallbacks cb;
+ memset(&cb, 0, sizeof(cb));
+ cb.ClassFileLoadHook = CommonClassFileLoadHookRetransformable;
+ if (jvmti_env->SetEventCallbacks(&cb, sizeof(cb)) != JVMTI_ERROR_NONE) {
+ printf("Unable to set class file load hook cb!\n");
+ return 1;
+ }
+ return 0;
+}
+
+} // namespace common_transform
+
static void BindMethod(jvmtiEnv* jenv,
JNIEnv* env,
jclass klass,
@@ -340,15 +423,29 @@
LOG(FATAL) << "Could not get methods";
}
- ArtMethod* m = jni::DecodeArtMethod(method);
-
std::string names[2];
- {
+ if (IsJVM()) {
+ // TODO Get the JNI long name
+ char* klass_name;
+ jvmtiError klass_result = jenv->GetClassSignature(klass, &klass_name, nullptr);
+ if (klass_result == JVMTI_ERROR_NONE) {
+ std::string name_str(name);
+ std::string klass_str(klass_name);
+ names[0] = GetJniShortName(klass_str, name_str);
+ jenv->Deallocate(reinterpret_cast<unsigned char*>(klass_name));
+ } else {
+ LOG(FATAL) << "Could not get class name!";
+ }
+ } else {
ScopedObjectAccess soa(Thread::Current());
+ ArtMethod* m = jni::DecodeArtMethod(method);
names[0] = m->JniShortName();
names[1] = m->JniLongName();
}
for (const std::string& mangled_name : names) {
+ if (mangled_name == "") {
+ continue;
+ }
void* sym = dlsym(RTLD_DEFAULT, mangled_name.c_str());
if (sym == nullptr) {
continue;
diff --git a/test/ti-agent/common_helper.h b/test/ti-agent/common_helper.h
index c60553d..0318501 100644
--- a/test/ti-agent/common_helper.h
+++ b/test/ti-agent/common_helper.h
@@ -27,10 +27,15 @@
jint OnLoad(JavaVM* vm, char* options, void* reserved);
} // namespace common_redefine
+
namespace common_retransform {
jint OnLoad(JavaVM* vm, char* options, void* reserved);
} // namespace common_retransform
+namespace common_transform {
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+} // namespace common_transform
+
extern bool RuntimeIsJVM;
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index 8ed8e67..c5a9356 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -28,6 +28,7 @@
#include "901-hello-ti-agent/basics.h"
#include "909-attach-agent/attach.h"
+#include "936-search-onload/search_onload.h"
namespace art {
@@ -110,6 +111,17 @@
{ "926-multi-obsolescence", common_redefine::OnLoad, nullptr },
{ "930-hello-retransform", common_retransform::OnLoad, nullptr },
{ "932-transform-saves", common_retransform::OnLoad, nullptr },
+ { "934-load-transform", common_retransform::OnLoad, nullptr },
+ { "935-non-retransformable", common_transform::OnLoad, nullptr },
+ { "936-search-onload", Test936SearchOnload::OnLoad, nullptr },
+ { "937-hello-retransform-package", common_retransform::OnLoad, nullptr },
+ { "938-load-transform-bcp", common_retransform::OnLoad, nullptr },
+ { "939-hello-transformation-bcp", common_redefine::OnLoad, nullptr },
+ { "940-recursive-obsolete", common_redefine::OnLoad, nullptr },
+ { "941-recursive-obsolete-jit", common_redefine::OnLoad, nullptr },
+ { "942-private-recursive", common_redefine::OnLoad, nullptr },
+ { "943-private-recursive-jit", common_redefine::OnLoad, nullptr },
+ { "944-transform-classloaders", common_redefine::OnLoad, nullptr },
};
static AgentLib* FindAgent(char* name) {
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 2d26b48..963efa4 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -52,6 +52,9 @@
shift
elif [[ "$1" == "" ]]; then
break
+ else
+ echo "Unknown options $@"
+ exit 1
fi
done
diff --git a/tools/cpp-define-generator/Android.bp b/tools/cpp-define-generator/Android.bp
index d792e90..59c5211 100644
--- a/tools/cpp-define-generator/Android.bp
+++ b/tools/cpp-define-generator/Android.bp
@@ -20,7 +20,7 @@
//
// In the future we may wish to parameterize this on (32,64)x(read_barrier,no_read_barrier).
-art_cc_binary {
+cc_binary { // Do not use art_cc_binary because HOST_PREFER_32_BIT is incompatible with genrule.
name: "cpp-define-generator-data",
host_supported: true,
device_supported: false,
@@ -34,3 +34,14 @@
"libbase",
],
}
+
+// Note: See $OUT_DIR/soong/build.ninja
+// For the exact filename that this generates to run make command on just
+// this rule later.
+genrule {
+ name: "cpp-define-generator-asm-support",
+ out: ["asm_support_gen.h"],
+ tools: ["cpp-define-generator-data"],
+ tool_files: ["verify-asm-support"],
+ cmd: "$(location verify-asm-support) --quiet \"$(location cpp-define-generator-data)\" \"$(out)\""
+}
diff --git a/tools/cpp-define-generator/presubmit-check-files-up-to-date b/tools/cpp-define-generator/presubmit-check-files-up-to-date
new file mode 100755
index 0000000..67a702a
--- /dev/null
+++ b/tools/cpp-define-generator/presubmit-check-files-up-to-date
@@ -0,0 +1,67 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ---------------------------------------------------------------------------
+
+# Generates asm_support_gen.h into a temporary location.
+# Then verifies it is the same as our local stored copy.
+
+GEN_TOOL=cpp-define-generator-data
+
+if ! which "$GEN_TOOL"; then
+ echo "ERROR: Please build cpp-define-generator-data or source build/envsetup.sh" >&2
+ exit 1
+fi
+
+#######################
+#######################
+
+PREUPLOAD_COMMIT_COPY="$(mktemp ${TMPDIR:-/tmp}/tmp.XXXXXX)"
+BUILD_COPY="$(mktemp ${TMPDIR:-/tmp}/tmp.XXXXXX)"
+
+function finish() {
+ # Delete temp files.
+ [[ -f "$PREUPLOAD_COMMIT_COPY" ]] && rm "$PREUPLOAD_COMMIT_COPY"
+ [[ -f "$BUILD_COPY" ]] && rm "$BUILD_COPY"
+}
+trap finish EXIT
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+ART_DIR="$( cd "$DIR/../.." && pwd )"
+ASM_SUPPORT_GEN_CHECKED_IN_COPY="runtime/generated/asm_support_gen.h"
+
+# Repo upload hook runs inside of the top-level git directory.
+# If we run this script manually, be in the right place for git.
+cd "$ART_DIR"
+
+if [[ -z $PREUPLOAD_COMMIT ]]; then
+ echo "WARNING: Not running as a pre-upload hook. Assuming commit to check = 'HEAD'"
+ PREUPLOAD_COMMIT=HEAD
+fi
+
+# Get version we are about to push into git.
+git show "$PREUPLOAD_COMMIT:$ASM_SUPPORT_GEN_CHECKED_IN_COPY" > "$PREUPLOAD_COMMIT_COPY" || exit 1
+# Get version that our build would have made.
+"$GEN_TOOL" > "$BUILD_COPY" || exit 1
+
+if ! diff "$PREUPLOAD_COMMIT_COPY" "$BUILD_COPY"; then
+ echo "asm-support: ERROR: Checked-in copy of '$ASM_SUPPORT_GEN_CHECKED_IN_COPY' " >&2
+ echo " has diverged from the build copy." >&2
+ echo " Please re-run the 'generate-asm-support' command to resync the header." >&2
+ exit 1
+fi
+
+# Success. Print nothing to avoid spamming users.
diff --git a/tools/cpp-define-generator/verify-asm-support b/tools/cpp-define-generator/verify-asm-support
new file mode 100755
index 0000000..745b115
--- /dev/null
+++ b/tools/cpp-define-generator/verify-asm-support
@@ -0,0 +1,101 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ---------------------------------------------------------------------------
+
+# Generates asm_support_gen.h into the $OUT directory in the build.
+# Then verifies that it is the same as in runtime/generated/asm_support_gen.h
+
+# Validates that art/runtime/generated/asm_support_gen.h
+# - This must be run after a build since it uses cpp-define-generator-data
+
+# Path to asm_support_gen.h that we check into our git repository.
+ASM_SUPPORT_GEN_CHECKED_IN_COPY="runtime/generated/asm_support_gen.h"
+# Instead of producing an error if checked-in copy differs from the generated version,
+# overwrite the local checked-in copy instead.
+OVERWRITE_CHECKED_IN_COPY_IF_CHANGED="n"
+
+#######################
+#######################
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+ART_DIR="$( cd "$DIR/../.." && pwd )"
+ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY="$ART_DIR/runtime/generated/asm_support_gen.h"
+
+# Sanity check that we haven't moved the file around.
+# If we did, perhaps the above constant should be updated.
+if ! [[ -f "$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY" ]]; then
+ echo "ERROR: Missing asm_support_gen.h, expected to be in '$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY'" >&2
+ exit 1
+fi
+
+# The absolute path to cpp-define-generator is in $1
+# Generate the file as part of the build into the out location specified by $2.
+
+# Compare that the generated file matches our golden copy that's checked into git.
+# If not, it is a fatal error and the user needs to run 'generate-asm-support' to rebuild.
+
+if [[ $# -lt 2 ]]; then
+ echo "Usage: $0 [--quiet] [--presubmit] <path-to-cpp-define-generator-data-binary> <output-file>'" >&2
+ exit 1
+fi
+
+# Supress 'chatty' messages during the build.
+# If anything is printed in a success case then
+# the main Android build can't reuse the same line for
+# showing multiple commands being executed.
+QUIET=false
+if [[ "$1" == "--quiet" ]]; then
+ QUIET=true
+ shift
+fi
+
+CPP_DEFINE_GENERATOR_TOOL="$1"
+OUTPUT_FILE="$2"
+
+function pecho() {
+ if ! $QUIET; then
+ echo "$@"
+ fi
+}
+
+# Generate the header. Print the command we're running to console for readability.
+pecho "cpp-define-generator-data > \"$OUTPUT_FILE\""
+"$CPP_DEFINE_GENERATOR_TOOL" > "$OUTPUT_FILE"
+retval="$?"
+
+if [[ $retval -ne 0 ]]; then
+ echo "verify-asm-support: FATAL: Error while running cpp-define-generator-data" >&2
+ exit $retval
+fi
+
+if ! diff "$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY" "$OUTPUT_FILE"; then
+
+ if [[ $OVERWRITE_CHECKED_IN_COPY_IF_CHANGED == "y" ]]; then
+ cp "$OUTPUT_FILE" "$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY"
+ echo "verify-asm-support: OK: Overwrote '$ASM_SUPPORT_GEN_CHECKED_IN_COPY' with build copy."
+ echo " Please 'git add $ASM_SUPPORT_GEN_CHECKED_IN_COPY'."
+ else
+ echo "---------------------------------------------------------------------------------------------" >&2
+ echo "verify-asm-support: ERROR: Checked-in copy of '$ASM_SUPPORT_GEN_CHECKED_IN_COPY' " >&2
+ echo " has diverged from the build copy." >&2
+ echo " Please re-run the 'generate-asm-support' command to resync the header." >&2
+ [[ -f "$OUTPUT_FILE" ]] && rm "$OUTPUT_FILE"
+ exit 1
+ fi
+fi
+
+pecho "verify-asm-support: SUCCESS. Built '$OUTPUT_FILE' which matches our checked in copy."
diff --git a/tools/dexfuzz/README b/tools/dexfuzz/README
index c1cdf1e..78f73f5 100644
--- a/tools/dexfuzz/README
+++ b/tools/dexfuzz/README
@@ -98,7 +98,7 @@
Timed Out - mutated files that timed out for one or more backends.
Current timeouts are:
Optimizing - 5 seconds
- Intepreter - 30 seconds
+ Interpreter - 30 seconds
(use --short-timeouts to set all backends to 2 seconds.)
Successful - mutated files that executed and all backends agreed on the resulting
output. NB: if all backends crashed with the same output, this would
diff --git a/tools/jfuzz/run_dex_fuzz_test.py b/tools/jfuzz/run_dex_fuzz_test.py
index 50c4f20..34a92f6 100755
--- a/tools/jfuzz/run_dex_fuzz_test.py
+++ b/tools/jfuzz/run_dex_fuzz_test.py
@@ -19,7 +19,7 @@
import shutil
import sys
-from subprocess import check_call
+from subprocess import call
from tempfile import mkdtemp
sys.path.append(os.path.dirname(os.path.dirname(
@@ -75,6 +75,9 @@
top = GetEnvVariableOrError('ANDROID_BUILD_TOP')
self._dexfuzz_env['PATH'] = (top + '/art/tools/bisection_search:' +
self._dexfuzz_env['PATH'])
+ android_root = GetEnvVariableOrError('ANDROID_HOST_OUT')
+ self._dexfuzz_env['ANDROID_ROOT'] = android_root
+ self._dexfuzz_env['LD_LIBRARY_PATH'] = android_root + '/lib'
os.chdir(self._dexfuzz_dir)
os.mkdir('divergent_programs')
os.mkdir('bisection_outputs')
@@ -119,24 +122,30 @@
def RunDexFuzz(self):
"""Starts the DexFuzz testing."""
os.chdir(self._dexfuzz_dir)
- dexfuzz_args = ['--inputs=' + self._inputs_dir, '--execute',
- '--execute-class=Test', '--repeat=' + str(self._num_tests),
- '--dump-output', '--interpreter', '--optimizing',
+ dexfuzz_args = ['--inputs=' + self._inputs_dir,
+ '--execute',
+ '--execute-class=Test',
+ '--repeat=' + str(self._num_tests),
+ '--dump-output', '--dump-verify',
+ '--interpreter', '--optimizing',
'--bisection-search']
if self._device is not None:
dexfuzz_args += ['--device=' + self._device, '--allarm']
else:
dexfuzz_args += ['--host'] # Assume host otherwise.
- check_call(['dexfuzz'] + dexfuzz_args, env=self._dexfuzz_env)
- # TODO: summarize findings.
+ cmd = ['dexfuzz'] + dexfuzz_args
+ print('**** Running ****\n\n', cmd, '\n')
+ call(cmd, env=self._dexfuzz_env)
+ print('\n**** Results (report.log) ****\n')
+ call(['tail', '-n 24', 'report.log'])
def main():
# Handle arguments.
parser = argparse.ArgumentParser()
- parser.add_argument('--num_tests', default=10000,
+ parser.add_argument('--num_tests', default=1000,
type=int, help='number of tests to run')
- parser.add_argument('--num_inputs', default=50,
+ parser.add_argument('--num_inputs', default=10,
type=int, help='number of JFuzz program to generate')
parser.add_argument('--device', help='target device serial number')
args = parser.parse_args()
diff --git a/tools/jfuzz/run_jfuzz_test_nightly.py b/tools/jfuzz/run_jfuzz_test_nightly.py
index 29595f2..a9f8365 100755
--- a/tools/jfuzz/run_jfuzz_test_nightly.py
+++ b/tools/jfuzz/run_jfuzz_test_nightly.py
@@ -26,9 +26,6 @@
from tempfile import mkdtemp
from tempfile import TemporaryFile
-# Default arguments for run_jfuzz_test.py.
-DEFAULT_ARGS = ['--num_tests=20000']
-
# run_jfuzz_test.py success string.
SUCCESS_STRING = 'success (no divergences)'
@@ -36,17 +33,22 @@
NOT_FOUND = -1
def main(argv):
+ # Set up.
cwd = os.path.dirname(os.path.realpath(__file__))
- cmd = [cwd + '/run_jfuzz_test.py'] + DEFAULT_ARGS
+ cmd = [cwd + '/run_jfuzz_test.py']
parser = argparse.ArgumentParser()
parser.add_argument('--num_proc', default=8,
type=int, help='number of processes to run')
# Unknown arguments are passed to run_jfuzz_test.py.
(args, unknown_args) = parser.parse_known_args()
+ # Run processes.
+ cmd = cmd + unknown_args
+ print('\n**** Running ****\n\n', cmd, '\n')
output_files = [TemporaryFile('wb+') for _ in range(args.num_proc)]
processes = []
- for output_file in output_files:
- processes.append(subprocess.Popen(cmd + unknown_args, stdout=output_file,
+ for i, output_file in enumerate(output_files):
+ print('Tester', i)
+ processes.append(subprocess.Popen(cmd, stdout=output_file,
stderr=subprocess.STDOUT))
try:
# Wait for processes to terminate.
@@ -56,6 +58,7 @@
for proc in processes:
proc.kill()
# Output results.
+ print('\n**** Results ****\n')
output_dirs = []
for i, output_file in enumerate(output_files):
output_file.seek(0)
@@ -65,20 +68,24 @@
directory_match = re.search(r'Directory[^:]*: ([^\n]+)\n', output_str)
if directory_match:
output_dirs.append(directory_match.group(1))
- print('Tester', i)
if output_str.find(SUCCESS_STRING) == NOT_FOUND:
- print(output_str)
+ print('Tester', i, output_str)
else:
- print(SUCCESS_STRING)
+ print('Tester', i, SUCCESS_STRING)
# Gather divergences.
global_out_dir = mkdtemp('jfuzz_nightly')
- divergence_nr = 1
+ divergence_nr = 0
for out_dir in output_dirs:
for divergence_dir in glob(out_dir + '/divergence*/'):
+ divergence_nr += 1
shutil.copytree(divergence_dir,
global_out_dir + '/divergence' + str(divergence_nr))
- divergence_nr += 1
- print('Global output directory:', global_out_dir)
+ if divergence_nr > 0:
+ print('\n!!!! Divergences !!!!', divergence_nr)
+ else:
+ print ('\nSuccess')
+ print('\nGlobal output directory:', global_out_dir)
+ print()
if __name__ == '__main__':
main(sys.argv)