Merge "ahat - An android heap dump viewer. Initial checkin."
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 566d289..1db654a 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -247,6 +247,7 @@
compiler/optimizing/graph_checker_test.cc \
compiler/optimizing/graph_test.cc \
compiler/optimizing/gvn_test.cc \
+ compiler/optimizing/induction_var_analysis_test.cc \
compiler/optimizing/licm_test.cc \
compiler/optimizing/live_interval_test.cc \
compiler/optimizing/nodes_test.cc \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 8b56880..ce9e367 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -71,6 +71,7 @@
optimizing/graph_checker.cc \
optimizing/graph_visualizer.cc \
optimizing/gvn.cc \
+ optimizing/induction_var_analysis.cc \
optimizing/inliner.cc \
optimizing/instruction_simplifier.cc \
optimizing/intrinsics.cc \
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 8abd592..09d37f8 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -668,14 +668,16 @@
LockTemp(TargetReg(kArg6));
LockTemp(TargetReg(kArg7));
} else {
- LockTemp(TargetReg(kFArg0));
- LockTemp(TargetReg(kFArg1));
- LockTemp(TargetReg(kFArg2));
- LockTemp(TargetReg(kFArg3));
if (fpuIs32Bit_) {
+ LockTemp(TargetReg(kFArg0));
+ LockTemp(TargetReg(kFArg1));
+ LockTemp(TargetReg(kFArg2));
+ LockTemp(TargetReg(kFArg3));
LockTemp(rs_rD6_fr0);
LockTemp(rs_rD7_fr0);
} else {
+ LockTemp(TargetReg(kFArg0));
+ LockTemp(TargetReg(kFArg2));
LockTemp(rs_rD6_fr1);
LockTemp(rs_rD7_fr1);
}
@@ -694,14 +696,16 @@
FreeTemp(TargetReg(kArg6));
FreeTemp(TargetReg(kArg7));
} else {
- FreeTemp(TargetReg(kFArg0));
- FreeTemp(TargetReg(kFArg1));
- FreeTemp(TargetReg(kFArg2));
- FreeTemp(TargetReg(kFArg3));
if (fpuIs32Bit_) {
+ FreeTemp(TargetReg(kFArg0));
+ FreeTemp(TargetReg(kFArg1));
+ FreeTemp(TargetReg(kFArg2));
+ FreeTemp(TargetReg(kFArg3));
FreeTemp(rs_rD6_fr0);
FreeTemp(rs_rD7_fr0);
} else {
+ FreeTemp(TargetReg(kFArg0));
+ FreeTemp(TargetReg(kFArg2));
FreeTemp(rs_rD6_fr1);
FreeTemp(rs_rD7_fr1);
}
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 6e73ae7..3642b82 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -679,11 +679,8 @@
return nullptr;
}
- if (driver->GetVerifiedMethod(&dex_file, method_idx)->HasRuntimeThrow()) {
- return nullptr;
- }
-
DCHECK(driver->GetCompilerOptions().IsCompilationEnabled());
+ DCHECK(!driver->GetVerifiedMethod(&dex_file, method_idx)->HasRuntimeThrow());
Runtime* const runtime = Runtime::Current();
ClassLinker* const class_linker = runtime->GetClassLinker();
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 273b1628..8eb37cf 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -37,11 +37,21 @@
namespace art {
+VerifiedMethod::VerifiedMethod(uint32_t encountered_error_types,
+ bool has_runtime_throw,
+ const SafeMap<uint32_t, std::set<uint32_t>>& string_init_pc_reg_map)
+ : encountered_error_types_(encountered_error_types),
+ has_runtime_throw_(has_runtime_throw),
+ string_init_pc_reg_map_(string_init_pc_reg_map) {
+}
+
const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_verifier,
bool compile) {
- std::unique_ptr<VerifiedMethod> verified_method(new VerifiedMethod);
- verified_method->has_verification_failures_ = method_verifier->HasFailures();
- verified_method->has_runtime_throw_ = method_verifier->HasInstructionThatWillThrow();
+ std::unique_ptr<VerifiedMethod> verified_method(
+ new VerifiedMethod(method_verifier->GetEncounteredFailureTypes(),
+ method_verifier->HasInstructionThatWillThrow(),
+ method_verifier->GetStringInitPcRegMap()));
+
if (compile) {
/* Generate a register map. */
if (!verified_method->GenerateGcMap(method_verifier)) {
@@ -66,8 +76,6 @@
verified_method->GenerateSafeCastSet(method_verifier);
}
- verified_method->SetStringInitPcRegMap(method_verifier->GetStringInitPcRegMap());
-
return verified_method.release();
}
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index f7d6d67..74fcb07 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -72,22 +72,25 @@
// Returns true if there were any errors during verification.
bool HasVerificationFailures() const {
- return has_verification_failures_;
+ return encountered_error_types_ != 0;
+ }
+
+ uint32_t GetEncounteredVerificationFailures() const {
+ return encountered_error_types_;
}
bool HasRuntimeThrow() const {
return has_runtime_throw_;
}
- void SetStringInitPcRegMap(SafeMap<uint32_t, std::set<uint32_t>>& string_init_pc_reg_map) {
- string_init_pc_reg_map_ = string_init_pc_reg_map;
- }
const SafeMap<uint32_t, std::set<uint32_t>>& GetStringInitPcRegMap() const {
return string_init_pc_reg_map_;
}
private:
- VerifiedMethod() = default;
+ VerifiedMethod(uint32_t encountered_error_types,
+ bool has_runtime_throw,
+ const SafeMap<uint32_t, std::set<uint32_t>>& string_init_pc_reg_map);
/*
* Generate the GC map for a method that has just been verified (i.e. we're doing this as part of
@@ -124,12 +127,12 @@
DequickenMap dequicken_map_;
SafeCastSet safe_cast_set_;
- bool has_verification_failures_ = false;
- bool has_runtime_throw_ = false;
+ const uint32_t encountered_error_types_;
+ const bool has_runtime_throw_;
// Copy of mapping generated by verifier of dex PCs of string init invocations
// to the set of other registers that the receiver has been copied into.
- SafeMap<uint32_t, std::set<uint32_t>> string_init_pc_reg_map_;
+ const SafeMap<uint32_t, std::set<uint32_t>> string_init_pc_reg_map_;
};
} // namespace art
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 80387f2..83f391d 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -31,7 +31,7 @@
namespace art {
inline mirror::DexCache* CompilerDriver::GetDexCache(const DexCompilationUnit* mUnit) {
- return mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
+ return mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile(), false);
}
inline mirror::ClassLoader* CompilerDriver::GetClassLoader(ScopedObjectAccess& soa,
@@ -87,7 +87,7 @@
}
inline mirror::DexCache* CompilerDriver::FindDexCache(const DexFile* dex_file) {
- return Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file);
+ return Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file, false);
}
inline ArtField* CompilerDriver::ResolveField(
@@ -339,7 +339,7 @@
// Sharpen a virtual call into a direct call. The method_idx is into referrer's
// dex cache, check that this resolved method is where we expect it.
CHECK_EQ(target_method->dex_file, mUnit->GetDexFile());
- DCHECK_EQ(dex_cache.Get(), mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ DCHECK_EQ(dex_cache.Get(), mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile(), false));
CHECK_EQ(referrer_class->GetDexCache()->GetResolvedMethod(
target_method->dex_method_index, pointer_size),
resolved_method) << PrettyMethod(resolved_method);
@@ -369,7 +369,7 @@
nullptr, kVirtual);
} else {
StackHandleScope<1> hs(soa.Self());
- auto target_dex_cache(hs.NewHandle(class_linker->FindDexCache(*devirt_target->dex_file)));
+ auto target_dex_cache(hs.NewHandle(class_linker->RegisterDexFile(*devirt_target->dex_file)));
called_method = class_linker->ResolveMethod(
*devirt_target->dex_file, devirt_target->dex_method_index, target_dex_cache,
class_loader, nullptr, kVirtual);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index fa25a17..c006e62 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -590,14 +590,18 @@
} else if ((access_flags & kAccAbstract) != 0) {
// Abstract methods don't have code.
} else {
- bool has_verified_method = driver->GetVerificationResults()
- ->GetVerifiedMethod(method_ref) != nullptr;
+ const VerifiedMethod* verified_method =
+ driver->GetVerificationResults()->GetVerifiedMethod(method_ref);
bool compile = compilation_enabled &&
// Basic checks, e.g., not <clinit>.
driver->GetVerificationResults()
->IsCandidateForCompilation(method_ref, access_flags) &&
// Did not fail to create VerifiedMethod metadata.
- has_verified_method &&
+ verified_method != nullptr &&
+ // Do not have failures that should punt to the interpreter.
+ !verified_method->HasRuntimeThrow() &&
+ (verified_method->GetEncounteredVerificationFailures() &
+ verifier::VERIFY_ERROR_FORCE_INTERPRETER) == 0 &&
// Is eligable for compilation by methods-to-compile filter.
driver->IsMethodToCompile(method_ref);
if (compile) {
@@ -620,7 +624,7 @@
method_idx,
class_loader,
dex_file,
- has_verified_method
+ (verified_method != nullptr)
? dex_to_dex_compilation_level
: optimizer::DexToDexCompilationLevel::kRequired);
}
@@ -936,7 +940,7 @@
uint16_t exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
StackHandleScope<2> hs2(self);
- Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->FindDexCache(*dex_file)));
+ Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->RegisterDexFile(*dex_file)));
Handle<mirror::Class> klass(hs2.NewHandle(
class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache,
NullHandle<mirror::ClassLoader>())));
@@ -1170,7 +1174,8 @@
IsImageClass(dex_file.StringDataByIdx(dex_file.GetTypeId(type_idx).descriptor_idx_))) {
{
ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+ mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(
+ dex_file, false);
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == nullptr) {
// Erroneous class.
@@ -1195,9 +1200,9 @@
// We resolve all const-string strings when building for the image.
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(
- hs.NewHandle(Runtime::Current()->GetClassLinker()->FindDexCache(dex_file)));
- Runtime::Current()->GetClassLinker()->ResolveString(dex_file, string_idx, dex_cache);
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file, false)));
+ class_linker->ResolveString(dex_file, string_idx, dex_cache);
result = true;
}
if (result) {
@@ -1222,7 +1227,7 @@
*equals_referrers_class = false;
}
ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+ mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file, false);
// Get type from dex cache assuming it was populated by the verifier
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == nullptr) {
@@ -1259,7 +1264,8 @@
const DexFile& dex_file,
uint32_t type_idx) {
ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+ mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(
+ dex_file, false);
// Get type from dex cache assuming it was populated by the verifier.
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == nullptr) {
@@ -1288,7 +1294,7 @@
uintptr_t* direct_type_ptr, bool* out_is_finalizable) {
ScopedObjectAccess soa(Thread::Current());
Runtime* runtime = Runtime::Current();
- mirror::DexCache* dex_cache = runtime->GetClassLinker()->FindDexCache(dex_file);
+ mirror::DexCache* dex_cache = runtime->GetClassLinker()->FindDexCache(dex_file, false);
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == nullptr) {
return false;
@@ -1417,7 +1423,7 @@
{
StackHandleScope<2> hs(soa.Self());
Handle<mirror::DexCache> dex_cache_handle(
- hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
+ hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile(), false)));
Handle<mirror::ClassLoader> class_loader_handle(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
resolved_field =
@@ -1467,7 +1473,7 @@
{
StackHandleScope<2> hs(soa.Self());
Handle<mirror::DexCache> dex_cache_handle(
- hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
+ hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile(), false)));
Handle<mirror::ClassLoader> class_loader_handle(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
resolved_field =
@@ -1653,7 +1659,7 @@
// Try to resolve the method and compiling method's class.
StackHandleScope<3> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(
- hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
+ hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile(), false)));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
uint32_t method_idx = target_method->dex_method_index;
@@ -1905,7 +1911,7 @@
StackHandleScope<2> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file, false)));
// Resolve the class.
mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache,
class_loader);
@@ -1998,7 +2004,7 @@
ClassLinker* class_linker = manager_->GetClassLinker();
const DexFile& dex_file = *manager_->GetDexFile();
StackHandleScope<2> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->RegisterDexFile(dex_file)));
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager_->GetClassLoader())));
mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
@@ -2084,7 +2090,7 @@
* This is to ensure the class is structurally sound for compilation. An unsound class
* will be rejected by the verifier and later skipped during compilation in the compiler.
*/
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file, false)));
std::string error_msg;
if (verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader,
&class_def, true, &error_msg) ==
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 93897aa..dbd3366 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -70,7 +70,6 @@
// Separate objects into multiple bins to optimize dirty memory use.
static constexpr bool kBinObjects = true;
-static constexpr bool kComputeEagerResolvedStrings = false;
static void CheckNoDexObjectsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -90,11 +89,6 @@
PruneNonImageClasses(); // Remove junk
ComputeLazyFieldsForImageClasses(); // Add useful information
- // Calling this can in theory fill in some resolved strings. However, in practice it seems to
- // never resolve any.
- if (kComputeEagerResolvedStrings) {
- ComputeEagerResolvedStrings();
- }
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
}
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -302,11 +296,15 @@
void ImageWriter::PrepareDexCacheArraySlots() {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock());
- size_t dex_cache_count = class_linker->GetDexCacheCount();
+ Thread* const self = Thread::Current();
+ ReaderMutexLock mu(self, *class_linker->DexLock());
uint32_t size = 0u;
- for (size_t idx = 0; idx < dex_cache_count; ++idx) {
- DexCache* dex_cache = class_linker->GetDexCache(idx);
+ for (jobject weak_root : class_linker->GetDexCaches()) {
+ mirror::DexCache* dex_cache =
+ down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
+ if (dex_cache == nullptr) {
+ continue;
+ }
const DexFile* dex_file = dex_cache->GetDexFile();
dex_cache_array_starts_.Put(dex_file, size);
DexCacheArraysLayout layout(target_ptr_size_, dex_file);
@@ -554,39 +552,6 @@
class_linker->VisitClassesWithoutClassesLock(&visitor);
}
-void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) {
- if (!obj->GetClass()->IsStringClass()) {
- return;
- }
- mirror::String* string = obj->AsString();
- const uint16_t* utf16_string = string->GetValue();
- size_t utf16_length = static_cast<size_t>(string->GetLength());
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock());
- size_t dex_cache_count = class_linker->GetDexCacheCount();
- for (size_t i = 0; i < dex_cache_count; ++i) {
- DexCache* dex_cache = class_linker->GetDexCache(i);
- const DexFile& dex_file = *dex_cache->GetDexFile();
- const DexFile::StringId* string_id;
- if (UNLIKELY(utf16_length == 0)) {
- string_id = dex_file.FindStringId("");
- } else {
- string_id = dex_file.FindStringId(utf16_string, utf16_length);
- }
- if (string_id != nullptr) {
- // This string occurs in this dex file, assign the dex cache entry.
- uint32_t string_idx = dex_file.GetIndexForStringId(*string_id);
- if (dex_cache->GetResolvedString(string_idx) == nullptr) {
- dex_cache->SetResolvedString(string_idx, string);
- }
- }
- }
-}
-
-void ImageWriter::ComputeEagerResolvedStrings() {
- Runtime::Current()->GetHeap()->VisitObjects(ComputeEagerResolvedStringsCallback, this);
-}
-
bool ImageWriter::IsImageClass(Class* klass) {
if (klass == nullptr) {
return false;
@@ -631,16 +596,14 @@
// Clear references to removed classes from the DexCaches.
const ArtMethod* resolution_method = runtime->GetResolutionMethod();
- size_t dex_cache_count;
- {
- ReaderMutexLock mu(self, *class_linker->DexLock());
- dex_cache_count = class_linker->GetDexCacheCount();
- }
- for (size_t idx = 0; idx < dex_cache_count; ++idx) {
- DexCache* dex_cache;
- {
- ReaderMutexLock mu(self, *class_linker->DexLock());
- dex_cache = class_linker->GetDexCache(idx);
+
+ ScopedAssertNoThreadSuspension sa(self, __FUNCTION__);
+ ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); // For ClassInClassTable
+ ReaderMutexLock mu2(self, *class_linker->DexLock());
+ for (jobject weak_root : class_linker->GetDexCaches()) {
+ mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
+ if (dex_cache == nullptr) {
+ continue;
}
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
Class* klass = dex_cache->GetResolvedType(i);
@@ -762,8 +725,12 @@
ReaderMutexLock mu(self, *class_linker->DexLock());
CHECK_EQ(dex_cache_count, class_linker->GetDexCacheCount())
<< "The number of dex caches changed.";
- for (size_t i = 0; i < dex_cache_count; ++i) {
- dex_caches->Set<false>(i, class_linker->GetDexCache(i));
+ size_t i = 0;
+ for (jobject weak_root : class_linker->GetDexCaches()) {
+ mirror::DexCache* dex_cache =
+ down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
+ dex_caches->Set<false>(i, dex_cache);
+ ++i;
}
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index c8aa82d..778521c 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -225,11 +225,6 @@
void ComputeLazyFieldsForImageClasses()
SHARED_REQUIRES(Locks::mutator_lock_);
- // Wire dex cache resolved strings to strings in the image to avoid runtime resolution.
- void ComputeEagerResolvedStrings() SHARED_REQUIRES(Locks::mutator_lock_);
- static void ComputeEagerResolvedStringsCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
// Remove unwanted classes from various roots.
void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 64e7487..0e0b224 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -691,6 +691,8 @@
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
+ // No thread suspension since dex_cache_ that may get invalidated if that occurs.
+ ScopedAssertNoThreadSuspension tsc(Thread::Current(), __FUNCTION__);
if (compiled_method != nullptr) { // ie. not an abstract method
size_t file_offset = file_offset_;
OutputStream* out = out_;
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 19dd944..d6b25ee 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -76,7 +76,8 @@
code_start_(nullptr),
latest_result_(nullptr),
can_use_baseline_for_string_init_(true),
- compilation_stats_(nullptr) {}
+ compilation_stats_(nullptr),
+ interpreter_metadata_(nullptr) {}
bool BuildGraph(const DexFile::CodeItem& code);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 6568ea4..503187b 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -887,7 +887,7 @@
} else {
stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
if (current->GetType() == Primitive::kPrimLong) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id);
++i;
DCHECK_LT(i, environment_size);
}
@@ -909,7 +909,8 @@
} else {
stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
if (current->GetType() == Primitive::kPrimDouble) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
+ stack_map_stream_.AddDexRegisterEntry(
+ DexRegisterLocation::Kind::kInFpuRegisterHigh, id);
++i;
DCHECK_LT(i, environment_size);
}
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
new file mode 100644
index 0000000..8aaec68
--- /dev/null
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -0,0 +1,479 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "induction_var_analysis.h"
+
+namespace art {
+
+/**
+ * Returns true if instruction is invariant within the given loop.
+ */
+static bool IsLoopInvariant(HLoopInformation* loop, HInstruction* instruction) {
+ HLoopInformation* other_loop = instruction->GetBlock()->GetLoopInformation();
+ if (other_loop != loop) {
+ // If instruction does not occur in same loop, it is invariant
+ // if it appears in an outer loop (including no loop at all).
+ return other_loop == nullptr || loop->IsIn(*other_loop);
+ }
+ return false;
+}
+
+/**
+ * Returns true if instruction is proper entry-phi-operation for given loop
+ * (referred to as mu-operation in Gerlek's paper).
+ */
+static bool IsEntryPhi(HLoopInformation* loop, HInstruction* instruction) {
+ return
+ instruction->IsPhi() &&
+ instruction->InputCount() == 2 &&
+ instruction->GetBlock() == loop->GetHeader();
+}
+
+//
+// Class methods.
+//
+
+HInductionVarAnalysis::HInductionVarAnalysis(HGraph* graph)
+ : HOptimization(graph, kInductionPassName),
+ global_depth_(0),
+ stack_(graph->GetArena()->Adapter()),
+ scc_(graph->GetArena()->Adapter()),
+ map_(std::less<int>(), graph->GetArena()->Adapter()),
+ cycle_(std::less<int>(), graph->GetArena()->Adapter()),
+ induction_(std::less<int>(), graph->GetArena()->Adapter()) {
+}
+
+void HInductionVarAnalysis::Run() {
+ // Detects sequence variables (generalized induction variables) during an
+ // inner-loop-first traversal of all loops using Gerlek's algorithm.
+ for (HPostOrderIterator it_graph(*graph_); !it_graph.Done(); it_graph.Advance()) {
+ HBasicBlock* graph_block = it_graph.Current();
+ if (graph_block->IsLoopHeader()) {
+ VisitLoop(graph_block->GetLoopInformation());
+ }
+ }
+}
+
+void HInductionVarAnalysis::VisitLoop(HLoopInformation* loop) {
+ // Find strongly connected components (SSCs) in the SSA graph of this loop using Tarjan's
+ // algorithm. Due to the descendant-first nature, classification happens "on-demand".
+ global_depth_ = 0;
+ CHECK(stack_.empty());
+ map_.clear();
+
+ for (HBlocksInLoopIterator it_loop(*loop); !it_loop.Done(); it_loop.Advance()) {
+ HBasicBlock* loop_block = it_loop.Current();
+ CHECK(loop_block->IsInLoop());
+ if (loop_block->GetLoopInformation() != loop) {
+ continue; // Inner loops already visited.
+ }
+ // Visit phi-operations and instructions.
+ for (HInstructionIterator it(loop_block->GetPhis()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (!IsVisitedNode(instruction->GetId())) {
+ VisitNode(loop, instruction);
+ }
+ }
+ for (HInstructionIterator it(loop_block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (!IsVisitedNode(instruction->GetId())) {
+ VisitNode(loop, instruction);
+ }
+ }
+ }
+
+ CHECK(stack_.empty());
+ map_.clear();
+}
+
+void HInductionVarAnalysis::VisitNode(HLoopInformation* loop, HInstruction* instruction) {
+ const int id = instruction->GetId();
+ const uint32_t d1 = ++global_depth_;
+ map_.Put(id, NodeInfo(d1));
+ stack_.push_back(instruction);
+
+ // Visit all descendants.
+ uint32_t low = d1;
+ for (size_t i = 0, count = instruction->InputCount(); i < count; ++i) {
+ low = std::min(low, VisitDescendant(loop, instruction->InputAt(i)));
+ }
+
+ // Lower or found SCC?
+ if (low < d1) {
+ map_.find(id)->second.depth = low;
+ } else {
+ scc_.clear();
+ cycle_.clear();
+
+ // Pop the stack to build the SCC for classification.
+ while (!stack_.empty()) {
+ HInstruction* x = stack_.back();
+ scc_.push_back(x);
+ stack_.pop_back();
+ map_.find(x->GetId())->second.done = true;
+ if (x == instruction) {
+ break;
+ }
+ }
+
+ // Classify the SCC.
+ if (scc_.size() == 1 && !IsEntryPhi(loop, scc_[0])) {
+ ClassifyTrivial(loop, scc_[0]);
+ } else {
+ ClassifyNonTrivial(loop);
+ }
+
+ scc_.clear();
+ cycle_.clear();
+ }
+}
+
+uint32_t HInductionVarAnalysis::VisitDescendant(HLoopInformation* loop, HInstruction* instruction) {
+ // If the definition is either outside the loop (loop invariant entry value)
+ // or assigned in inner loop (inner exit value), the traversal stops.
+ HLoopInformation* otherLoop = instruction->GetBlock()->GetLoopInformation();
+ if (otherLoop != loop) {
+ return global_depth_;
+ }
+
+ // Inspect descendant node.
+ const int id = instruction->GetId();
+ if (!IsVisitedNode(id)) {
+ VisitNode(loop, instruction);
+ return map_.find(id)->second.depth;
+ } else {
+ auto it = map_.find(id);
+ return it->second.done ? global_depth_ : it->second.depth;
+ }
+}
+
+void HInductionVarAnalysis::ClassifyTrivial(HLoopInformation* loop, HInstruction* instruction) {
+ InductionInfo* info = nullptr;
+ if (instruction->IsPhi()) {
+ for (size_t i = 1, count = instruction->InputCount(); i < count; i++) {
+ info = TransferPhi(LookupInfo(loop, instruction->InputAt(0)),
+ LookupInfo(loop, instruction->InputAt(i)));
+ }
+ } else if (instruction->IsAdd()) {
+ info = TransferAddSub(LookupInfo(loop, instruction->InputAt(0)),
+ LookupInfo(loop, instruction->InputAt(1)), kAdd);
+ } else if (instruction->IsSub()) {
+ info = TransferAddSub(LookupInfo(loop, instruction->InputAt(0)),
+ LookupInfo(loop, instruction->InputAt(1)), kSub);
+ } else if (instruction->IsMul()) {
+ info = TransferMul(LookupInfo(loop, instruction->InputAt(0)),
+ LookupInfo(loop, instruction->InputAt(1)));
+ } else if (instruction->IsNeg()) {
+ info = TransferNeg(LookupInfo(loop, instruction->InputAt(0)));
+ }
+
+ // Successfully classified?
+ if (info != nullptr) {
+ AssignInfo(loop, instruction, info);
+ }
+}
+
+void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
+ const size_t size = scc_.size();
+ CHECK_GE(size, 1u);
+ HInstruction* phi = scc_[size - 1];
+ if (!IsEntryPhi(loop, phi)) {
+ return;
+ }
+ HInstruction* external = phi->InputAt(0);
+ HInstruction* internal = phi->InputAt(1);
+ InductionInfo* initial = LookupInfo(loop, external);
+ if (initial == nullptr || initial->induction_class != kInvariant) {
+ return;
+ }
+
+ // Singleton entry-phi-operation may be a wrap-around induction.
+ if (size == 1) {
+ InductionInfo* update = LookupInfo(loop, internal);
+ if (update != nullptr) {
+ AssignInfo(loop, phi, NewInductionInfo(kWrapAround, kNop, initial, update, nullptr));
+ }
+ return;
+ }
+
+ // Inspect remainder of the cycle that resides in scc_. The cycle_ mapping assigns
+ // temporary meaning to its nodes.
+ cycle_.Overwrite(phi->GetId(), nullptr);
+ for (size_t i = 0; i < size - 1; i++) {
+ HInstruction* operation = scc_[i];
+ InductionInfo* update = nullptr;
+ if (operation->IsPhi()) {
+ update = TransferCycleOverPhi(operation);
+ } else if (operation->IsAdd()) {
+ update = TransferCycleOverAddSub(loop, operation->InputAt(0), operation->InputAt(1), kAdd, true);
+ } else if (operation->IsSub()) {
+ update = TransferCycleOverAddSub(loop, operation->InputAt(0), operation->InputAt(1), kSub, true);
+ }
+ if (update == nullptr) {
+ return;
+ }
+ cycle_.Overwrite(operation->GetId(), update);
+ }
+
+ // Success if the internal link received accumulated nonzero update.
+ auto it = cycle_.find(internal->GetId());
+ if (it != cycle_.end() && it->second != nullptr) {
+ // Classify header phi and feed the cycle "on-demand".
+ AssignInfo(loop, phi, NewInductionInfo(kLinear, kNop, it->second, initial, nullptr));
+ for (size_t i = 0; i < size - 1; i++) {
+ ClassifyTrivial(loop, scc_[i]);
+ }
+ }
+}
+
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferPhi(InductionInfo* a,
+ InductionInfo* b) {
+ // Transfer over a phi: if both inputs are identical, result is input.
+ if (InductionEqual(a, b)) {
+ return a;
+ }
+ return nullptr;
+}
+
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferAddSub(InductionInfo* a,
+ InductionInfo* b,
+ InductionOp op) {
+ // Transfer over an addition or subtraction: invariant or linear
+ // inputs combine into new invariant or linear result.
+ if (a != nullptr && b != nullptr) {
+ if (a->induction_class == kInvariant && b->induction_class == kInvariant) {
+ return NewInductionInfo(kInvariant, op, a, b, nullptr);
+ } else if (a->induction_class == kLinear && b->induction_class == kInvariant) {
+ return NewInductionInfo(
+ kLinear,
+ kNop,
+ a->op_a,
+ NewInductionInfo(kInvariant, op, a->op_b, b, nullptr),
+ nullptr);
+ } else if (a->induction_class == kInvariant && b->induction_class == kLinear) {
+ InductionInfo* ba = b->op_a;
+ if (op == kSub) { // negation required
+ ba = NewInductionInfo(kInvariant, kNeg, nullptr, ba, nullptr);
+ }
+ return NewInductionInfo(
+ kLinear,
+ kNop,
+ ba,
+ NewInductionInfo(kInvariant, op, a, b->op_b, nullptr),
+ nullptr);
+ } else if (a->induction_class == kLinear && b->induction_class == kLinear) {
+ return NewInductionInfo(
+ kLinear,
+ kNop,
+ NewInductionInfo(kInvariant, op, a->op_a, b->op_a, nullptr),
+ NewInductionInfo(kInvariant, op, a->op_b, b->op_b, nullptr),
+ nullptr);
+ }
+ }
+ return nullptr;
+}
+
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferMul(InductionInfo* a,
+ InductionInfo* b) {
+ // Transfer over a multiplication: invariant or linear
+ // inputs combine into new invariant or linear result.
+ // Two linear inputs would become quadratic.
+ if (a != nullptr && b != nullptr) {
+ if (a->induction_class == kInvariant && b->induction_class == kInvariant) {
+ return NewInductionInfo(kInvariant, kMul, a, b, nullptr);
+ } else if (a->induction_class == kLinear && b->induction_class == kInvariant) {
+ return NewInductionInfo(
+ kLinear,
+ kNop,
+ NewInductionInfo(kInvariant, kMul, a->op_a, b, nullptr),
+ NewInductionInfo(kInvariant, kMul, a->op_b, b, nullptr),
+ nullptr);
+ } else if (a->induction_class == kInvariant && b->induction_class == kLinear) {
+ return NewInductionInfo(
+ kLinear,
+ kNop,
+ NewInductionInfo(kInvariant, kMul, a, b->op_a, nullptr),
+ NewInductionInfo(kInvariant, kMul, a, b->op_b, nullptr),
+ nullptr);
+ }
+ }
+ return nullptr;
+}
+
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferNeg(InductionInfo* a) {
+ // Transfer over a unary negation: invariant or linear input
+ // yields a similar, but negated result.
+ if (a != nullptr) {
+ if (a->induction_class == kInvariant) {
+ return NewInductionInfo(kInvariant, kNeg, nullptr, a, nullptr);
+ } else if (a->induction_class == kLinear) {
+ return NewInductionInfo(
+ kLinear,
+ kNop,
+ NewInductionInfo(kInvariant, kNeg, nullptr, a->op_a, nullptr),
+ NewInductionInfo(kInvariant, kNeg, nullptr, a->op_b, nullptr),
+ nullptr);
+ }
+ }
+ return nullptr;
+}
+
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferCycleOverPhi(HInstruction* phi) {
+ // Transfer within a cycle over a phi: only identical inputs
+ // can be combined into that input as result.
+ const size_t count = phi->InputCount();
+ CHECK_GT(count, 0u);
+ auto ita = cycle_.find(phi->InputAt(0)->GetId());
+ if (ita != cycle_.end()) {
+ InductionInfo* a = ita->second;
+ for (size_t i = 1; i < count; i++) {
+ auto itb = cycle_.find(phi->InputAt(i)->GetId());
+ if (itb == cycle_.end() ||!HInductionVarAnalysis::InductionEqual(a, itb->second)) {
+ return nullptr;
+ }
+ }
+ return a;
+ }
+ return nullptr;
+}
+
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferCycleOverAddSub(
+ HLoopInformation* loop,
+ HInstruction* x,
+ HInstruction* y,
+ InductionOp op,
+ bool first) {
+ // Transfer within a cycle over an addition or subtraction: adding or
+ // subtracting an invariant value adds to the stride of the induction,
+ // starting with the phi value denoted by the unusual nullptr value.
+ auto it = cycle_.find(x->GetId());
+ if (it != cycle_.end()) {
+ InductionInfo* a = it->second;
+ InductionInfo* b = LookupInfo(loop, y);
+ if (b != nullptr && b->induction_class == kInvariant) {
+ if (a == nullptr) {
+ if (op == kSub) { // negation required
+ return NewInductionInfo(kInvariant, kNeg, nullptr, b, nullptr);
+ }
+ return b;
+ } else if (a->induction_class == kInvariant) {
+ return NewInductionInfo(kInvariant, op, a, b, nullptr);
+ }
+ }
+ }
+ // On failure, try alternatives.
+ if (op == kAdd) {
+ // Try the other way around for an addition.
+ if (first) {
+ return TransferCycleOverAddSub(loop, y, x, op, false);
+ }
+ }
+ return nullptr;
+}
+
+void HInductionVarAnalysis::PutInfo(int loop_id, int id, InductionInfo* info) {
+ auto it = induction_.find(loop_id);
+ if (it == induction_.end()) {
+ it = induction_.Put(
+ loop_id, ArenaSafeMap<int, InductionInfo*>(std::less<int>(), graph_->GetArena()->Adapter()));
+ }
+ it->second.Overwrite(id, info);
+}
+
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::GetInfo(int loop_id, int id) {
+ auto it = induction_.find(loop_id);
+ if (it != induction_.end()) {
+ auto loop_it = it->second.find(id);
+ if (loop_it != it->second.end()) {
+ return loop_it->second;
+ }
+ }
+ return nullptr;
+}
+
+void HInductionVarAnalysis::AssignInfo(HLoopInformation* loop,
+ HInstruction* instruction,
+ InductionInfo* info) {
+ const int loopId = loop->GetHeader()->GetBlockId();
+ const int id = instruction->GetId();
+ PutInfo(loopId, id, info);
+}
+
+HInductionVarAnalysis::InductionInfo*
+HInductionVarAnalysis::LookupInfo(HLoopInformation* loop,
+ HInstruction* instruction) {
+ const int loop_id = loop->GetHeader()->GetBlockId();
+ const int id = instruction->GetId();
+ InductionInfo* info = GetInfo(loop_id, id);
+ if (info == nullptr && IsLoopInvariant(loop, instruction)) {
+ info = NewInductionInfo(kInvariant, kFetch, nullptr, nullptr, instruction);
+ PutInfo(loop_id, id, info);
+ }
+ return info;
+}
+
+bool HInductionVarAnalysis::InductionEqual(InductionInfo* info1,
+ InductionInfo* info2) {
+ // Test structural equality only, without accounting for simplifications.
+ if (info1 != nullptr && info2 != nullptr) {
+ return
+ info1->induction_class == info2->induction_class &&
+ info1->operation == info2->operation &&
+ info1->fetch == info2->fetch &&
+ InductionEqual(info1->op_a, info2->op_a) &&
+ InductionEqual(info1->op_b, info2->op_b);
+ }
+ // Otherwise only two nullptrs are considered equal.
+ return info1 == info2;
+}
+
+std::string HInductionVarAnalysis::InductionToString(InductionInfo* info) {
+ if (info != nullptr) {
+ if (info->induction_class == kInvariant) {
+ std::string inv = "(";
+ inv += InductionToString(info->op_a);
+ switch (info->operation) {
+ case kNop: inv += " ? "; break;
+ case kAdd: inv += " + "; break;
+ case kSub:
+ case kNeg: inv += " - "; break;
+ case kMul: inv += " * "; break;
+ case kDiv: inv += " / "; break;
+ case kFetch:
+ CHECK(info->fetch != nullptr);
+ inv += std::to_string(info->fetch->GetId()) + ":" + info->fetch->DebugName();
+ break;
+ }
+ inv += InductionToString(info->op_b);
+ return inv + ")";
+ } else {
+ CHECK(info->operation == kNop);
+ if (info->induction_class == kLinear) {
+ return "(" + InductionToString(info->op_a) + " * i + " +
+ InductionToString(info->op_b) + ")";
+ } else if (info->induction_class == kWrapAround) {
+ return "wrap(" + InductionToString(info->op_a) + ", " +
+ InductionToString(info->op_b) + ")";
+ } else if (info->induction_class == kPeriodic) {
+ return "periodic(" + InductionToString(info->op_a) + ", " +
+ InductionToString(info->op_b) + ")";
+ }
+ }
+ }
+ return "";
+}
+
+} // namespace art
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
new file mode 100644
index 0000000..09a0a38
--- /dev/null
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INDUCTION_VAR_ANALYSIS_H_
+#define ART_COMPILER_OPTIMIZING_INDUCTION_VAR_ANALYSIS_H_
+
+#include <string>
+
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+
+/**
+ * Induction variable analysis.
+ *
+ * Based on the paper by M. Gerlek et al.
+ * "Beyond Induction Variables: Detecting and Classifying Sequences Using a Demand-Driven SSA Form"
+ * (ACM Transactions on Programming Languages and Systems, Volume 17 Issue 1, Jan. 1995).
+ */
+class HInductionVarAnalysis : public HOptimization {
+ public:
+ explicit HInductionVarAnalysis(HGraph* graph);
+
+ // TODO: design public API useful in later phases
+
+ /**
+ * Returns string representation of induction found for the instruction
+ * in the given loop (for testing and debugging only).
+ */
+ std::string InductionToString(HLoopInformation* loop, HInstruction* instruction) {
+ return InductionToString(LookupInfo(loop, instruction));
+ }
+
+ void Run() OVERRIDE;
+
+ private:
+ static constexpr const char* kInductionPassName = "induction_var_analysis";
+
+ struct NodeInfo {
+ explicit NodeInfo(uint32_t d) : depth(d), done(false) {}
+ uint32_t depth;
+ bool done;
+ };
+
+ enum InductionClass {
+ kNone,
+ kInvariant,
+ kLinear,
+ kWrapAround,
+ kPeriodic,
+ kMonotonic
+ };
+
+ enum InductionOp {
+ kNop, // no-operation: a true induction
+ kAdd,
+ kSub,
+ kNeg,
+ kMul,
+ kDiv,
+ kFetch
+ };
+
+ /**
+ * Defines a detected induction as:
+ * (1) invariant:
+ * operation: a + b, a - b, -b, a * b, a / b
+ * or
+ * fetch: fetch from HIR
+ * (2) linear:
+ * nop: a * i + b
+ * (3) wrap-around
+ * nop: a, then defined by b
+ * (4) periodic
+ * nop: a, then defined by b (repeated when exhausted)
+ * (5) monotonic
+ * // TODO: determine representation
+ */
+ struct InductionInfo : public ArenaObject<kArenaAllocMisc> {
+ InductionInfo(InductionClass ic,
+ InductionOp op,
+ InductionInfo* a,
+ InductionInfo* b,
+ HInstruction* f)
+ : induction_class(ic),
+ operation(op),
+ op_a(a),
+ op_b(b),
+ fetch(f) {}
+ InductionClass induction_class;
+ InductionOp operation;
+ InductionInfo* op_a;
+ InductionInfo* op_b;
+ HInstruction* fetch;
+ };
+
+ inline bool IsVisitedNode(int id) const {
+ return map_.find(id) != map_.end();
+ }
+
+ inline InductionInfo* NewInductionInfo(
+ InductionClass c,
+ InductionOp op,
+ InductionInfo* a,
+ InductionInfo* b,
+ HInstruction* i) {
+ return new (graph_->GetArena()) InductionInfo(c, op, a, b, i);
+ }
+
+ // Methods for analysis.
+ void VisitLoop(HLoopInformation* loop);
+ void VisitNode(HLoopInformation* loop, HInstruction* instruction);
+ uint32_t VisitDescendant(HLoopInformation* loop, HInstruction* instruction);
+ void ClassifyTrivial(HLoopInformation* loop, HInstruction* instruction);
+ void ClassifyNonTrivial(HLoopInformation* loop);
+
+ // Transfer operations.
+ InductionInfo* TransferPhi(InductionInfo* a, InductionInfo* b);
+ InductionInfo* TransferAddSub(InductionInfo* a, InductionInfo* b, InductionOp op);
+ InductionInfo* TransferMul(InductionInfo* a, InductionInfo* b);
+ InductionInfo* TransferNeg(InductionInfo* a);
+ InductionInfo* TransferCycleOverPhi(HInstruction* phi);
+ InductionInfo* TransferCycleOverAddSub(HLoopInformation* loop,
+ HInstruction* x,
+ HInstruction* y,
+ InductionOp op,
+ bool first);
+
+ // Assign and lookup.
+ void PutInfo(int loop_id, int id, InductionInfo* info);
+ InductionInfo* GetInfo(int loop_id, int id);
+ void AssignInfo(HLoopInformation* loop, HInstruction* instruction, InductionInfo* info);
+ InductionInfo* LookupInfo(HLoopInformation* loop, HInstruction* instruction);
+ bool InductionEqual(InductionInfo* info1, InductionInfo* info2);
+ std::string InductionToString(InductionInfo* info);
+
+ // Bookkeeping during and after analysis.
+ // TODO: fine tune data structures, only keep relevant data
+
+ uint32_t global_depth_;
+
+ ArenaVector<HInstruction*> stack_;
+ ArenaVector<HInstruction*> scc_;
+
+ // Mappings of instruction id to node and induction information.
+ ArenaSafeMap<int, NodeInfo> map_;
+ ArenaSafeMap<int, InductionInfo*> cycle_;
+
+ // Mapping from loop id to mapping of instruction id to induction information.
+ ArenaSafeMap<int, ArenaSafeMap<int, InductionInfo*>> induction_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInductionVarAnalysis);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_INDUCTION_VAR_ANALYSIS_H_
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
new file mode 100644
index 0000000..2093e33
--- /dev/null
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -0,0 +1,514 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <regex>
+
+#include "base/arena_allocator.h"
+#include "builder.h"
+#include "gtest/gtest.h"
+#include "induction_var_analysis.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+
+namespace art {
+
+/**
+ * Fixture class for the InductionVarAnalysis tests.
+ */
+class InductionVarAnalysisTest : public testing::Test {
+ public:
+ InductionVarAnalysisTest() : pool_(), allocator_(&pool_) {
+ graph_ = CreateGraph(&allocator_);
+ }
+
+ ~InductionVarAnalysisTest() { }
+
+ // Builds single for-loop at depth d.
+ void BuildForLoop(int d, int n) {
+ ASSERT_LT(d, n);
+ loop_preheader_[d] = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(loop_preheader_[d]);
+ loop_header_[d] = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(loop_header_[d]);
+ loop_preheader_[d]->AddSuccessor(loop_header_[d]);
+ if (d < (n - 1)) {
+ BuildForLoop(d + 1, n);
+ }
+ loop_body_[d] = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(loop_body_[d]);
+ loop_body_[d]->AddSuccessor(loop_header_[d]);
+ if (d < (n - 1)) {
+ loop_header_[d]->AddSuccessor(loop_preheader_[d + 1]);
+ loop_header_[d + 1]->AddSuccessor(loop_body_[d]);
+ } else {
+ loop_header_[d]->AddSuccessor(loop_body_[d]);
+ }
+ }
+
+ // Builds a n-nested loop in CFG where each loop at depth 0 <= d < n
+ // is defined as "for (int i_d = 0; i_d < 100; i_d++)". Tests can further
+ // populate the loop with instructions to set up interesting scenarios.
+ void BuildLoopNest(int n) {
+ ASSERT_LE(n, 10);
+ graph_->SetNumberOfVRegs(n + 2);
+
+ // Build basic blocks with entry, nested loop, exit.
+ entry_ = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry_);
+ BuildForLoop(0, n);
+ exit_ = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(exit_);
+ entry_->AddSuccessor(loop_preheader_[0]);
+ loop_header_[0]->AddSuccessor(exit_);
+ graph_->SetEntryBlock(entry_);
+ graph_->SetExitBlock(exit_);
+
+ // Provide entry and exit instructions.
+ // 0 : parameter
+ // 1 : constant 0
+ // 2 : constant 1
+ // 3 : constant 100
+ parameter_ = new (&allocator_)
+ HParameterValue(0, Primitive::kPrimNot, true);
+ entry_->AddInstruction(parameter_);
+ constant0_ = new (&allocator_) HConstant(Primitive::kPrimInt);
+ entry_->AddInstruction(constant0_);
+ constant1_ = new (&allocator_) HConstant(Primitive::kPrimInt);
+ entry_->AddInstruction(constant1_);
+ constant100_ = new (&allocator_) HConstant(Primitive::kPrimInt);
+ entry_->AddInstruction(constant100_);
+ exit_->AddInstruction(new (&allocator_) HExit());
+ induc_ = new (&allocator_) HLocal(n);
+ entry_->AddInstruction(induc_);
+ entry_->AddInstruction(new (&allocator_) HStoreLocal(induc_, constant0_));
+ tmp_ = new (&allocator_) HLocal(n + 1);
+ entry_->AddInstruction(tmp_);
+ entry_->AddInstruction(new (&allocator_) HStoreLocal(tmp_, constant100_));
+
+ // Provide loop instructions.
+ for (int d = 0; d < n; d++) {
+ basic_[d] = new (&allocator_) HLocal(d);
+ entry_->AddInstruction(basic_[d]);
+ loop_preheader_[d]->AddInstruction(
+ new (&allocator_) HStoreLocal(basic_[d], constant0_));
+ HInstruction* load = new (&allocator_)
+ HLoadLocal(basic_[d], Primitive::kPrimInt);
+ loop_header_[d]->AddInstruction(load);
+ HInstruction* compare = new (&allocator_)
+ HGreaterThanOrEqual(load, constant100_);
+ loop_header_[d]->AddInstruction(compare);
+ loop_header_[d]->AddInstruction(new (&allocator_) HIf(compare));
+ load = new (&allocator_) HLoadLocal(basic_[d], Primitive::kPrimInt);
+ loop_body_[d]->AddInstruction(load);
+ increment_[d] = new (&allocator_)
+ HAdd(Primitive::kPrimInt, load, constant1_);
+ loop_body_[d]->AddInstruction(increment_[d]);
+ loop_body_[d]->AddInstruction(
+ new (&allocator_) HStoreLocal(basic_[d], increment_[d]));
+ loop_body_[d]->AddInstruction(new (&allocator_) HGoto());
+ }
+ }
+
+ // Builds if-statement at depth d.
+ void BuildIf(int d, HBasicBlock** ifT, HBasicBlock **ifF) {
+ HBasicBlock* cond = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* ifTrue = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* ifFalse = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(cond);
+ graph_->AddBlock(ifTrue);
+ graph_->AddBlock(ifFalse);
+ // Conditional split.
+ loop_header_[d]->ReplaceSuccessor(loop_body_[d], cond);
+ cond->AddSuccessor(ifTrue);
+ cond->AddSuccessor(ifFalse);
+ ifTrue->AddSuccessor(loop_body_[d]);
+ ifFalse->AddSuccessor(loop_body_[d]);
+ cond->AddInstruction(new (&allocator_) HIf(parameter_));
+ *ifT = ifTrue;
+ *ifF = ifFalse;
+ }
+
+ // Inserts instruction right before increment at depth d.
+ HInstruction* InsertInstruction(HInstruction* instruction, int d) {
+ loop_body_[d]->InsertInstructionBefore(instruction, increment_[d]);
+ return instruction;
+ }
+
+ // Inserts local load at depth d.
+ HInstruction* InsertLocalLoad(HLocal* local, int d) {
+ return InsertInstruction(
+ new (&allocator_) HLoadLocal(local, Primitive::kPrimInt), d);
+ }
+
+ // Inserts local store at depth d.
+ HInstruction* InsertLocalStore(HLocal* local, HInstruction* rhs, int d) {
+ return InsertInstruction(new (&allocator_) HStoreLocal(local, rhs), d);
+ }
+
+ // Inserts an array store with given local as subscript at depth d to
+ // enable tests to inspect the computed induction at that point easily.
+ HInstruction* InsertArrayStore(HLocal* subscript, int d) {
+ HInstruction* load = InsertInstruction(
+ new (&allocator_) HLoadLocal(subscript, Primitive::kPrimInt), d);
+ return InsertInstruction(new (&allocator_) HArraySet(
+ parameter_, load, constant0_, Primitive::kPrimInt, 0), d);
+ }
+
+ // Returns loop information of loop at depth d.
+ HLoopInformation* GetLoopInfo(int d) {
+ return loop_body_[d]->GetLoopInformation();
+ }
+
+ // Performs InductionVarAnalysis (after proper set up).
+ void PerformInductionVarAnalysis() {
+ ASSERT_TRUE(graph_->TryBuildingSsa());
+ iva_ = new (&allocator_) HInductionVarAnalysis(graph_);
+ iva_->Run();
+ }
+
+ // General building fields.
+ ArenaPool pool_;
+ ArenaAllocator allocator_;
+ HGraph* graph_;
+ HInductionVarAnalysis* iva_;
+
+ // Fixed basic blocks and instructions.
+ HBasicBlock* entry_;
+ HBasicBlock* exit_;
+ HInstruction* parameter_; // "this"
+ HInstruction* constant0_;
+ HInstruction* constant1_;
+ HInstruction* constant100_;
+ HLocal* induc_; // "vreg_n", the "k"
+ HLocal* tmp_; // "vreg_n+1"
+
+ // Loop specifics.
+ HBasicBlock* loop_preheader_[10];
+ HBasicBlock* loop_header_[10];
+ HBasicBlock* loop_body_[10];
+ HInstruction* increment_[10];
+ HLocal* basic_[10]; // "vreg_d", the "i_d"
+};
+
+//
+// The actual InductionVarAnalysis tests.
+//
+
+TEST_F(InductionVarAnalysisTest, ProperLoopSetup) {
+ // Setup:
+ // for (int i_0 = 0; i_0 < 100; i_0++) {
+ // ..
+ // for (int i_9 = 0; i_9 < 100; i_9++) {
+ // }
+ // ..
+ // }
+ BuildLoopNest(10);
+ ASSERT_TRUE(graph_->TryBuildingSsa());
+ ASSERT_EQ(entry_->GetLoopInformation(), nullptr);
+ for (int d = 0; d < 1; d++) {
+ ASSERT_EQ(loop_preheader_[d]->GetLoopInformation(),
+ (d == 0) ? nullptr
+ : loop_header_[d - 1]->GetLoopInformation());
+ ASSERT_NE(loop_header_[d]->GetLoopInformation(), nullptr);
+ ASSERT_NE(loop_body_[d]->GetLoopInformation(), nullptr);
+ ASSERT_EQ(loop_header_[d]->GetLoopInformation(),
+ loop_body_[d]->GetLoopInformation());
+ }
+ ASSERT_EQ(exit_->GetLoopInformation(), nullptr);
+}
+
+TEST_F(InductionVarAnalysisTest, FindBasicInductionVar) {
+ // Setup:
+ // for (int i = 0; i < 100; i++) {
+ // a[i] = 0;
+ // }
+ BuildLoopNest(1);
+ HInstruction* store = InsertArrayStore(basic_[0], 0);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ(
+ "((2:Constant) * i + (1:Constant))",
+ iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
+ EXPECT_STREQ(
+ "((2:Constant) * i + ((1:Constant) + (2:Constant)))",
+ iva_->InductionToString(GetLoopInfo(0), increment_[0]).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindDerivedInductionVarAdd) {
+ // Setup:
+ // for (int i = 0; i < 100; i++) {
+ // k = 100 + i;
+ // a[k] = 0;
+ // }
+ BuildLoopNest(1);
+ HInstruction *add = InsertInstruction(
+ new (&allocator_) HAdd(
+ Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
+ InsertLocalStore(induc_, add, 0);
+ HInstruction* store = InsertArrayStore(induc_, 0);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ(
+ "((2:Constant) * i + ((3:Constant) + (1:Constant)))",
+ iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindDerivedInductionVarSub) {
+ // Setup:
+ // for (int i = 0; i < 100; i++) {
+ // k = 100 - i;
+ // a[k] = 0;
+ // }
+ BuildLoopNest(1);
+ HInstruction *sub = InsertInstruction(
+ new (&allocator_) HSub(
+ Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
+ InsertLocalStore(induc_, sub, 0);
+ HInstruction* store = InsertArrayStore(induc_, 0);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ(
+ "(( - (2:Constant)) * i + ((3:Constant) - (1:Constant)))",
+ iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindDerivedInductionVarMul) {
+ // Setup:
+ // for (int i = 0; i < 100; i++) {
+ // k = 100 * i;
+ // a[k] = 0;
+ // }
+ BuildLoopNest(1);
+ HInstruction *mul = InsertInstruction(
+ new (&allocator_) HMul(
+ Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
+ InsertLocalStore(induc_, mul, 0);
+ HInstruction* store = InsertArrayStore(induc_, 0);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ(
+ "(((3:Constant) * (2:Constant)) * i + ((3:Constant) * (1:Constant)))",
+ iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindDerivedInductionVarNeg) {
+ // Setup:
+ // for (int i = 0; i < 100; i++) {
+ // k = - i;
+ // a[k] = 0;
+ // }
+ BuildLoopNest(1);
+ HInstruction *neg = InsertInstruction(
+ new (&allocator_) HNeg(
+ Primitive::kPrimInt, InsertLocalLoad(basic_[0], 0)), 0);
+ InsertLocalStore(induc_, neg, 0);
+ HInstruction* store = InsertArrayStore(induc_, 0);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ(
+ "(( - (2:Constant)) * i + ( - (1:Constant)))",
+ iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindChainInduction) {
+ // Setup:
+ // k = 0;
+ // for (int i = 0; i < 100; i++) {
+ // k = k + 100;
+ // a[k] = 0;
+ // k = k - 1;
+ // a[k] = 0;
+ // }
+ BuildLoopNest(1);
+ HInstruction *add = InsertInstruction(
+ new (&allocator_) HAdd(
+ Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
+ InsertLocalStore(induc_, add, 0);
+ HInstruction* store1 = InsertArrayStore(induc_, 0);
+ HInstruction *sub = InsertInstruction(
+ new (&allocator_) HSub(
+ Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant1_), 0);
+ InsertLocalStore(induc_, sub, 0);
+ HInstruction* store2 = InsertArrayStore(induc_, 0);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ(
+ "(((3:Constant) - (2:Constant)) * i + ((1:Constant) + (3:Constant)))",
+ iva_->InductionToString(GetLoopInfo(0), store1->InputAt(1)).c_str());
+ EXPECT_STREQ(
+ "(((3:Constant) - (2:Constant)) * i + "
+ "(((1:Constant) + (3:Constant)) - (2:Constant)))",
+ iva_->InductionToString(GetLoopInfo(0), store2->InputAt(1)).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindTwoWayBasicInduction) {
+ // Setup:
+ // k = 0;
+ // for (int i = 0; i < 100; i++) {
+ // if () k = k + 1;
+ // else k = k + 1;
+ // a[k] = 0;
+ // }
+ BuildLoopNest(1);
+ HBasicBlock* ifTrue;
+ HBasicBlock* ifFalse;
+ BuildIf(0, &ifTrue, &ifFalse);
+ // True-branch.
+ HInstruction* load1 = new (&allocator_)
+ HLoadLocal(induc_, Primitive::kPrimInt);
+ ifTrue->AddInstruction(load1);
+ HInstruction* inc1 = new (&allocator_)
+ HAdd(Primitive::kPrimInt, load1, constant1_);
+ ifTrue->AddInstruction(inc1);
+ ifTrue->AddInstruction(new (&allocator_) HStoreLocal(induc_, inc1));
+ // False-branch.
+ HInstruction* load2 = new (&allocator_)
+ HLoadLocal(induc_, Primitive::kPrimInt);
+ ifFalse->AddInstruction(load2);
+ HInstruction* inc2 = new (&allocator_)
+ HAdd(Primitive::kPrimInt, load2, constant1_);
+ ifFalse->AddInstruction(inc2);
+ ifFalse->AddInstruction(new (&allocator_) HStoreLocal(induc_, inc2));
+ // Merge over a phi.
+ HInstruction* store = InsertArrayStore(induc_, 0);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ(
+ "((2:Constant) * i + ((1:Constant) + (2:Constant)))",
+ iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindTwoWayDerivedInduction) {
+ // Setup:
+ // for (int i = 0; i < 100; i++) {
+ // if () k = i + 1;
+ // else k = i + 1;
+ // a[k] = 0;
+ // }
+ BuildLoopNest(1);
+ HBasicBlock* ifTrue;
+ HBasicBlock* ifFalse;
+ BuildIf(0, &ifTrue, &ifFalse);
+ // True-branch.
+ HInstruction* load1 = new (&allocator_)
+ HLoadLocal(basic_[0], Primitive::kPrimInt);
+ ifTrue->AddInstruction(load1);
+ HInstruction* inc1 = new (&allocator_)
+ HAdd(Primitive::kPrimInt, load1, constant1_);
+ ifTrue->AddInstruction(inc1);
+ ifTrue->AddInstruction(new (&allocator_) HStoreLocal(induc_, inc1));
+ // False-branch.
+ HInstruction* load2 = new (&allocator_)
+ HLoadLocal(basic_[0], Primitive::kPrimInt);
+ ifFalse->AddInstruction(load2);
+ HInstruction* inc2 = new (&allocator_)
+ HAdd(Primitive::kPrimInt, load2, constant1_);
+ ifFalse->AddInstruction(inc2);
+ ifFalse->AddInstruction(new (&allocator_) HStoreLocal(induc_, inc2));
+ // Merge over a phi.
+ HInstruction* store = InsertArrayStore(induc_, 0);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ(
+ "((2:Constant) * i + ((1:Constant) + (2:Constant)))",
+ iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindFirstOrderWrapAroundInduction) {
+ // Setup:
+ // k = 0;
+ // for (int i = 0; i < 100; i++) {
+ // a[k] = 0;
+ // k = 100 - i;
+ // }
+ BuildLoopNest(1);
+ HInstruction* store = InsertArrayStore(induc_, 0);
+ HInstruction *sub = InsertInstruction(
+ new (&allocator_) HSub(
+ Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
+ InsertLocalStore(induc_, sub, 0);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ(
+ "wrap((1:Constant), "
+ "(( - (2:Constant)) * i + ((3:Constant) - (1:Constant))))",
+ iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindSecondOrderWrapAroundInduction) {
+ // Setup:
+ // k = 0;
+ // t = 100;
+ // for (int i = 0; i < 100; i++) {
+ // a[k] = 0;
+ // k = t;
+ // t = 100 - i;
+ // }
+ BuildLoopNest(1);
+ HInstruction* store = InsertArrayStore(induc_, 0);
+ InsertLocalStore(induc_, InsertLocalLoad(tmp_, 0), 0);
+ HInstruction *sub = InsertInstruction(
+ new (&allocator_) HSub(
+ Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
+ InsertLocalStore(tmp_, sub, 0);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ(
+ "wrap((1:Constant), wrap((3:Constant), "
+ "(( - (2:Constant)) * i + ((3:Constant) - (1:Constant)))))",
+ iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) {
+ // Setup:
+ // k = 0;
+ // for (int i_0 = 0; i_0 < 100; i_0++) {
+ // ..
+ // for (int i_9 = 0; i_9 < 100; i_9++) {
+ // k = 1 + k;
+ // a[k] = 0;
+ // }
+ // ..
+ // }
+ BuildLoopNest(10);
+ HInstruction *inc = InsertInstruction(
+ new (&allocator_) HAdd(
+ Primitive::kPrimInt, constant1_, InsertLocalLoad(induc_, 9)), 9);
+ InsertLocalStore(induc_, inc, 9);
+ HInstruction* store = InsertArrayStore(induc_, 9);
+ PerformInductionVarAnalysis();
+
+ // Match exact number of constants, but be less strict on phi number,
+ // since that depends on the SSA building phase.
+ std::regex r("\\(\\(2:Constant\\) \\* i \\+ "
+ "\\(\\(2:Constant\\) \\+ \\(\\d+:Phi\\)\\)\\)");
+
+ for (int d = 0; d < 10; d++) {
+ if (d == 9) {
+ EXPECT_TRUE(std::regex_match(
+ iva_->InductionToString(GetLoopInfo(d), store->InputAt(1)), r));
+ } else {
+ EXPECT_STREQ(
+ "",
+ iva_->InductionToString(GetLoopInfo(d), store->InputAt(1)).c_str());
+ }
+ EXPECT_STREQ(
+ "((2:Constant) * i + ((1:Constant) + (2:Constant)))",
+ iva_->InductionToString(GetLoopInfo(d), increment_[d]).c_str());
+ }
+}
+
+} // namespace art
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index feaaaf4..075ec1e 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -16,16 +16,12 @@
#include "intrinsics.h"
-#include "art_method.h"
-#include "class_linker.h"
#include "dex/quick/dex_file_method_inliner.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "driver/compiler_driver.h"
#include "invoke_type.h"
-#include "mirror/dex_cache-inl.h"
#include "nodes.h"
#include "quick/inline_method_analyser.h"
-#include "scoped_thread_state_change.h"
#include "utils.h"
namespace art {
@@ -368,34 +364,17 @@
if (inst->IsInvoke()) {
HInvoke* invoke = inst->AsInvoke();
InlineMethod method;
- const DexFile& dex_file = invoke->GetDexFile();
- DexFileMethodInliner* inliner = driver_->GetMethodInlinerMap()->GetMethodInliner(&dex_file);
+ DexFileMethodInliner* inliner =
+ driver_->GetMethodInlinerMap()->GetMethodInliner(&invoke->GetDexFile());
DCHECK(inliner != nullptr);
if (inliner->IsIntrinsic(invoke->GetDexMethodIndex(), &method)) {
Intrinsics intrinsic = GetIntrinsic(method, graph_->GetInstructionSet());
if (intrinsic != Intrinsics::kNone) {
if (!CheckInvokeType(intrinsic, invoke)) {
- // We might be in a situation where we have inlined a method that calls an intrinsic,
- // but that method is in a different dex file on which we do not have a
- // verified_method that would have helped the compiler driver sharpen the call.
- // We can still ensure the invoke types match by checking whether the called method
- // is final or is in a final class.
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- {
- ScopedObjectAccess soa(Thread::Current());
- ArtMethod* art_method = class_linker->FindDexCache(dex_file)->GetResolvedMethod(
- invoke->GetDexMethodIndex(), class_linker->GetImagePointerSize());
- DCHECK(art_method != nullptr);
- if (art_method->IsFinal() || art_method->GetDeclaringClass()->IsFinal()) {
- invoke->SetIntrinsic(intrinsic, NeedsEnvironmentOrCache(intrinsic));
- } else {
- LOG(WARNING) << "Found an intrinsic with unexpected invoke type: "
- << intrinsic << " for "
- << PrettyMethod(invoke->GetDexMethodIndex(), invoke->GetDexFile())
- << invoke->DebugName();
- }
- }
+ LOG(WARNING) << "Found an intrinsic with unexpected invoke type: "
+ << intrinsic << " for "
+ << PrettyMethod(invoke->GetDexMethodIndex(), invoke->GetDexFile());
} else {
invoke->SetIntrinsic(intrinsic, NeedsEnvironmentOrCache(intrinsic));
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index f6bbace..2a76991 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -795,8 +795,8 @@
const DexFile& dex_file) const {
CompilerDriver* compiler_driver = GetCompilerDriver();
CompiledMethod* method = nullptr;
- if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) &&
- !compiler_driver->GetVerifiedMethod(&dex_file, method_idx)->HasRuntimeThrow()) {
+ DCHECK(!compiler_driver->GetVerifiedMethod(&dex_file, method_idx)->HasRuntimeThrow());
+ if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)) {
method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
method_idx, jclass_loader, dex_file);
} else {
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 97b9972..824f28e 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -362,7 +362,7 @@
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
ClassLinker* cl = Runtime::Current()->GetClassLinker();
- mirror::DexCache* dex_cache = cl->FindDexCache(instr->AsInvoke()->GetDexFile());
+ mirror::DexCache* dex_cache = cl->FindDexCache(instr->AsInvoke()->GetDexFile(), false);
ArtMethod* method = dex_cache->GetResolvedMethod(
instr->AsInvoke()->GetDexMethodIndex(), cl->GetImagePointerSize());
DCHECK(method != nullptr);
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 33207d9..c4a3b28 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -143,6 +143,22 @@
stream.AddDexRegisterEntry(Kind::kInFpuRegister, 3); // Short location.
stream.EndStackMapEntry();
+ ArenaBitVector sp_mask3(&arena, 0, true);
+ sp_mask3.SetBit(1);
+ sp_mask3.SetBit(5);
+ stream.BeginStackMapEntry(2, 192, 0xAB, &sp_mask3, number_of_dex_registers, 0);
+ stream.AddDexRegisterEntry(Kind::kInRegister, 6); // Short location.
+ stream.AddDexRegisterEntry(Kind::kInRegisterHigh, 8); // Short location.
+ stream.EndStackMapEntry();
+
+ ArenaBitVector sp_mask4(&arena, 0, true);
+ sp_mask4.SetBit(6);
+ sp_mask4.SetBit(7);
+ stream.BeginStackMapEntry(3, 256, 0xCD, &sp_mask4, number_of_dex_registers, 0);
+ stream.AddDexRegisterEntry(Kind::kInFpuRegister, 3); // Short location, same in stack map 2.
+ stream.AddDexRegisterEntry(Kind::kInFpuRegisterHigh, 1); // Short location.
+ stream.EndStackMapEntry();
+
size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
@@ -151,15 +167,15 @@
CodeInfo code_info(region);
StackMapEncoding encoding = code_info.ExtractEncoding();
ASSERT_EQ(2u, encoding.NumberOfBytesForStackMask());
- ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
+ ASSERT_EQ(4u, code_info.GetNumberOfStackMaps());
uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
- ASSERT_EQ(4u, number_of_location_catalog_entries);
+ ASSERT_EQ(7u, number_of_location_catalog_entries);
DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
// The Dex register location catalog contains:
- // - three 1-byte short Dex register locations, and
+ // - six 1-byte short Dex register locations, and
// - one 5-byte large Dex register location.
- size_t expected_location_catalog_size = 3u * 1u + 5u;
+ size_t expected_location_catalog_size = 6u * 1u + 5u;
ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
// First stack map.
@@ -278,6 +294,116 @@
ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
}
+
+ // Third stack map.
+ {
+ StackMap stack_map = code_info.GetStackMapAt(2, encoding);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(2u, encoding)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(192u, encoding)));
+ ASSERT_EQ(2u, stack_map.GetDexPc(encoding));
+ ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding));
+ ASSERT_EQ(0xABu, stack_map.GetRegisterMask(encoding));
+
+ MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
+ ASSERT_TRUE(SameBits(stack_mask, sp_mask3));
+
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding));
+ DexRegisterMap dex_register_map =
+ code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
+ // The Dex register map contains:
+ // - one 1-byte live bit mask, and
+ // - one 1-byte set of location catalog entry indices composed of two 2-bit values.
+ size_t expected_dex_register_map_size = 1u + 1u;
+ ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
+
+ ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationKind(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map.GetLocationKind(
+ 1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationInternalKind(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map.GetLocationInternalKind(
+ 1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(6, dex_register_map.GetMachineRegister(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(8, dex_register_map.GetMachineRegister(
+ 1, number_of_dex_registers, code_info, encoding));
+
+ size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
+ 0, number_of_dex_registers, number_of_location_catalog_entries);
+ size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
+ 1, number_of_dex_registers, number_of_location_catalog_entries);
+ ASSERT_EQ(4u, index0);
+ ASSERT_EQ(5u, index1);
+ DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
+ DexRegisterLocation location1 = location_catalog.GetDexRegisterLocation(index1);
+ ASSERT_EQ(Kind::kInRegister, location0.GetKind());
+ ASSERT_EQ(Kind::kInRegisterHigh, location1.GetKind());
+ ASSERT_EQ(Kind::kInRegister, location0.GetInternalKind());
+ ASSERT_EQ(Kind::kInRegisterHigh, location1.GetInternalKind());
+ ASSERT_EQ(6, location0.GetValue());
+ ASSERT_EQ(8, location1.GetValue());
+
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
+ }
+
+ // Fourth stack map.
+ {
+ StackMap stack_map = code_info.GetStackMapAt(3, encoding);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(3u, encoding)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(256u, encoding)));
+ ASSERT_EQ(3u, stack_map.GetDexPc(encoding));
+ ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding));
+ ASSERT_EQ(0xCDu, stack_map.GetRegisterMask(encoding));
+
+ MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
+ ASSERT_TRUE(SameBits(stack_mask, sp_mask4));
+
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding));
+ DexRegisterMap dex_register_map =
+ code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
+ // The Dex register map contains:
+ // - one 1-byte live bit mask, and
+ // - one 1-byte set of location catalog entry indices composed of two 2-bit values.
+ size_t expected_dex_register_map_size = 1u + 1u;
+ ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
+
+ ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationKind(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map.GetLocationKind(
+ 1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationInternalKind(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map.GetLocationInternalKind(
+ 1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(3, dex_register_map.GetMachineRegister(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(1, dex_register_map.GetMachineRegister(
+ 1, number_of_dex_registers, code_info, encoding));
+
+ size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
+ 0, number_of_dex_registers, number_of_location_catalog_entries);
+ size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
+ 1, number_of_dex_registers, number_of_location_catalog_entries);
+ ASSERT_EQ(3u, index0); // Shared with second stack map.
+ ASSERT_EQ(6u, index1);
+ DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
+ DexRegisterLocation location1 = location_catalog.GetDexRegisterLocation(index1);
+ ASSERT_EQ(Kind::kInFpuRegister, location0.GetKind());
+ ASSERT_EQ(Kind::kInFpuRegisterHigh, location1.GetKind());
+ ASSERT_EQ(Kind::kInFpuRegister, location0.GetInternalKind());
+ ASSERT_EQ(Kind::kInFpuRegisterHigh, location1.GetInternalKind());
+ ASSERT_EQ(3, location0.GetValue());
+ ASSERT_EQ(1, location1.GetValue());
+
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
+ }
}
TEST(StackMapTest, TestNonLiveDexRegisters) {
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index b499ddd..4e918e9 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -3299,17 +3299,17 @@
Register tmp_reg = kNoRegister;
if (!Address::CanHoldStoreOffsetThumb(type, offset)) {
CHECK_NE(base, IP);
- if (reg != IP &&
- (type != kStoreWordPair || reg + 1 != IP)) {
+ if ((reg != IP) &&
+ ((type != kStoreWordPair) || (reg + 1 != IP))) {
tmp_reg = IP;
} else {
// Be careful not to use IP twice (for `reg` (or `reg` + 1 in
- // the case of a word-pair store)) and to build the Address
- // object used by the store instruction(s) below). Instead,
- // save R5 on the stack (or R6 if R5 is not available), use it
- // as secondary temporary register, and restore it after the
- // store instruction has been emitted.
- tmp_reg = base != R5 ? R5 : R6;
+ // the case of a word-pair store) and `base`) to build the
+ // Address object used by the store instruction(s) below.
+ // Instead, save R5 on the stack (or R6 if R5 is already used by
+ // `base`), use it as secondary temporary register, and restore
+ // it after the store instruction has been emitted.
+ tmp_reg = (base != R5) ? R5 : R6;
Push(tmp_reg);
if (base == SP) {
offset += kRegisterSize;
@@ -3338,8 +3338,8 @@
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
- if (tmp_reg != kNoRegister && tmp_reg != IP) {
- DCHECK(tmp_reg == R5 || tmp_reg == R6);
+ if ((tmp_reg != kNoRegister) && (tmp_reg != IP)) {
+ CHECK((tmp_reg == R5) || (tmp_reg == R6));
Pop(tmp_reg);
}
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 99736e9..07cf88c 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1207,6 +1207,14 @@
oat_file_.reset();
}
+ void Shutdown() {
+ ScopedObjectAccess soa(Thread::Current());
+ for (jobject dex_cache : dex_caches_) {
+ soa.Env()->DeleteLocalRef(dex_cache);
+ }
+ dex_caches_.clear();
+ }
+
// Set up the environment for compilation. Includes starting the runtime and loading/opening the
// boot class path.
bool Setup() {
@@ -1320,8 +1328,9 @@
compiled_methods_.reset(nullptr); // By default compile everything.
}
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
if (boot_image_option_.empty()) {
- dex_files_ = Runtime::Current()->GetClassLinker()->GetBootClassPath();
+ dex_files_ = class_linker->GetBootClassPath();
} else {
if (dex_filenames_.empty()) {
ATRACE_BEGIN("Opening zip archive from file descriptor");
@@ -1374,11 +1383,15 @@
}
}
}
- // Ensure opened dex files are writable for dex-to-dex transformations.
+ // Ensure opened dex files are writable for dex-to-dex transformations. Also ensure that
+ // the dex caches stay live since we don't want class unloading to occur during compilation.
for (const auto& dex_file : dex_files_) {
if (!dex_file->EnableWrite()) {
PLOG(ERROR) << "Failed to make .dex file writeable '" << dex_file->GetLocation() << "'\n";
}
+ ScopedObjectAccess soa(self);
+ dex_caches_.push_back(soa.AddLocalReference<jobject>(
+ class_linker->RegisterDexFile(*dex_file)));
}
// If we use a swap file, ensure we are above the threshold to make it necessary.
@@ -1423,6 +1436,7 @@
// Handle and ClassLoader creation needs to come after Runtime::Create
jobject class_loader = nullptr;
Thread* self = Thread::Current();
+
if (!boot_image_option_.empty()) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
OpenClassPathFiles(runtime_->GetClassPathString(), dex_files_, &class_path_files_);
@@ -1957,6 +1971,7 @@
bool is_host_;
std::string android_root_;
std::vector<const DexFile*> dex_files_;
+ std::vector<jobject> dex_caches_;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
std::unique_ptr<CompilerDriver> driver_;
std::vector<std::string> verbose_methods_;
@@ -2107,11 +2122,15 @@
return EXIT_FAILURE;
}
+ bool result;
if (dex2oat.IsImage()) {
- return CompileImage(dex2oat);
+ result = CompileImage(dex2oat);
} else {
- return CompileApp(dex2oat);
+ result = CompileApp(dex2oat);
}
+
+ dex2oat.Shutdown();
+ return result;
}
} // namespace art
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 44b78ff..1950d56 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -812,11 +812,15 @@
DumpDexCode(vios->Stream(), dex_file, code_item);
}
+ std::unique_ptr<StackHandleScope<1>> hs;
std::unique_ptr<verifier::MethodVerifier> verifier;
if (Runtime::Current() != nullptr) {
+ // We need to have the handle scope stay live until after the verifier since the verifier has
+ // a handle to the dex cache from hs.
+ hs.reset(new StackHandleScope<1>(Thread::Current()));
vios->Stream() << "VERIFIER TYPE ANALYSIS:\n";
ScopedIndentation indent2(vios);
- verifier.reset(DumpVerifier(vios,
+ verifier.reset(DumpVerifier(vios, hs.get(),
dex_method_idx, &dex_file, class_def, code_item,
method_access_flags));
}
@@ -1389,6 +1393,7 @@
}
verifier::MethodVerifier* DumpVerifier(VariableIndentationOutputStream* vios,
+ StackHandleScope<1>* hs,
uint32_t dex_method_idx,
const DexFile* dex_file,
const DexFile::ClassDef& class_def,
@@ -1396,9 +1401,8 @@
uint32_t method_access_flags) {
if ((method_access_flags & kAccNative) == 0) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(
- hs.NewHandle(Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file)));
+ hs->NewHandle(Runtime::Current()->GetClassLinker()->RegisterDexFile(*dex_file)));
DCHECK(options_.class_loader_ != nullptr);
return verifier::MethodVerifier::VerifyMethodAndDump(
soa.Self(), vios, dex_method_idx, dex_file, dex_cache, *options_.class_loader_,
@@ -1599,10 +1603,13 @@
dex_cache_arrays_.clear();
{
ReaderMutexLock mu(self, *class_linker->DexLock());
- for (size_t i = 0; i < class_linker->GetDexCacheCount(); ++i) {
- auto* dex_cache = class_linker->GetDexCache(i);
- dex_cache_arrays_.insert(dex_cache->GetResolvedFields());
- dex_cache_arrays_.insert(dex_cache->GetResolvedMethods());
+ for (jobject weak_root : class_linker->GetDexCaches()) {
+ mirror::DexCache* dex_cache =
+ down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
+ if (dex_cache != nullptr) {
+ dex_cache_arrays_.insert(dex_cache->GetResolvedFields());
+ dex_cache_arrays_.insert(dex_cache->GetResolvedMethods());
+ }
}
}
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index f6d954f..d6396c1 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -609,7 +609,7 @@
.cfi_rel_offset lr, 20
sub sp, #8 @ push padding
.cfi_adjust_cfa_offset 8
- @ mov r0, r0 @ pass ref in r0 (no-op for now since parameter ref is unused)
+ @ mov r0, \rRef @ pass ref in r0 (no-op for now since parameter ref is unused)
.ifnc \rObj, r1
mov r1, \rObj @ pass rObj
.endif
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 8ba3d43..bfef0fa 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1146,7 +1146,7 @@
.cfi_rel_offset x4, 32
.cfi_rel_offset x30, 40
- // mov x0, x0 // pass ref in x0 (no-op for now since parameter ref is unused)
+ // mov x0, \xRef // pass ref in x0 (no-op for now since parameter ref is unused)
.ifnc \xObj, x1
mov x1, \xObj // pass xObj
.endif
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 8bc75e5..cb49cf5 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -869,7 +869,7 @@
sw $a0, 0($sp)
.cfi_rel_offset 4, 0
- # move $a0, $a0 # pass ref in a0 (no-op for now since parameter ref is unused)
+ # move $a0, \rRef # pass ref in a0 (no-op for now since parameter ref is unused)
.ifnc \rObj, $a1
move $a1, \rObj # pass rObj
.endif
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index c30e6ca..4bc049c 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -922,7 +922,7 @@
sd $a0, 0($sp)
.cfi_rel_offset 4, 0
- # move $a0, $a0 # pass ref in a0 (no-op for now since parameter ref is unused)
+ # move $a0, \rRef # pass ref in a0 (no-op for now since parameter ref is unused)
.ifnc \rObj, $a1
move $a1, \rObj # pass rObj
.endif
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 1da5a2f..9b2d59d 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1200,9 +1200,9 @@
READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, false
cmpl %eax, %ebx
POP eax // restore eax from the push in the beginning of READ_BARRIER macro
+ // This asymmetric push/pop saves a push of eax and maintains stack alignment.
#elif defined(USE_HEAP_POISONING)
PUSH eax // save eax
- // Cannot call READ_BARRIER macro here, because the above push messes up stack alignment.
movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax
UNPOISON_HEAP_REF eax
cmpl %eax, %ebx
@@ -1225,15 +1225,22 @@
PUSH eax // save arguments
PUSH ecx
PUSH edx
+#if defined(USE_READ_BARRIER)
+ subl LITERAL(4), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(4)
+ READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, true
+ subl LITERAL(4), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH eax // pass arg2 - type of the value to be stored
+#elif defined(USE_HEAP_POISONING)
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
-#ifdef USE_HEAP_POISONING
- // This load does not need read barrier, since edx is unchanged and there's no GC safe point
- // from last read of MIRROR_OBJECT_CLASS_OFFSET(%edx).
- movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax // pass arg2 - type of the value to be stored
+ movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax
UNPOISON_HEAP_REF eax
- PUSH eax
+ PUSH eax // pass arg2 - type of the value to be stored
#else
+ subl LITERAL(8), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(8)
pushl MIRROR_OBJECT_CLASS_OFFSET(%edx) // pass arg2 - type of the value to be stored
CFI_ADJUST_CFA_OFFSET(4)
#endif
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index f4c9488..88270d9 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -918,6 +918,13 @@
// RDI: uint32_t type_idx, RSI: ArtMethod*
// RDX, RCX, R8, R9: free. RAX: return val.
// TODO: Add read barrier when this function is used.
+ // Note this function can/should implement read barrier fast path only
+ // (no read barrier slow path) because this is the fast path of tlab allocation.
+ // We can fall back to the allocation slow path to do the read barrier slow path.
+#if defined(USE_READ_BARRIER)
+ int3
+ int3
+#endif
// Might need a special macro since rsi and edx is 32b/64b mismatched.
movl ART_METHOD_DEX_CACHE_TYPES_OFFSET(%rsi), %edx // Load dex cache resolved types array
UNPOISON_HEAP_REF edx
@@ -1165,7 +1172,7 @@
END_MACRO
/*
- * Macro to insert read barrier, used in art_quick_aput_obj and art_quick_alloc_object_tlab.
+ * Macro to insert read barrier, used in art_quick_aput_obj.
* obj_reg and dest_reg{32|64} are registers, offset is a defined literal such as
* MIRROR_OBJECT_CLASS_OFFSET. dest_reg needs two versions to handle the mismatch between
* 64b PUSH/POP and 32b argument.
@@ -1182,8 +1189,8 @@
PUSH rcx
SETUP_FP_CALLEE_SAVE_FRAME
// Outgoing argument set up
- // movl %edi, %edi // pass ref, no-op for now since parameter ref is unused
- // // movq %rdi, %rdi
+ // movl REG_VAR(ref_reg32), %edi // pass ref, no-op for now since parameter ref is unused
+ // // movq REG_VAR(ref_reg64), %rdi
movl REG_VAR(obj_reg), %esi // pass obj_reg
// movq REG_VAR(obj_reg), %rsi
movl MACRO_LITERAL((RAW_VAR(offset))), %edx // pass offset, double parentheses are necessary
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 35acd42..084c88e 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -141,10 +141,10 @@
#define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (36 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_ACCESS_FLAGS_OFFSET,
art::mirror::Class::AccessFlagsOffset().Int32Value())
-#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (96 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (100 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_OFFSET,
art::mirror::Class::ObjectSizeOffset().Int32Value())
-#define MIRROR_CLASS_STATUS_OFFSET (108 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_STATUS_OFFSET (112 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET,
art::mirror::Class::StatusOffset().Int32Value())
@@ -153,7 +153,7 @@
static_cast<uint32_t>(art::mirror::Class::kStatusInitialized))
#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE 0x80000000
ADD_TEST_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE),
- static_cast<uint32_t>(kAccClassIsFinalizable))
+ static_cast<uint32_t>(art::kAccClassIsFinalizable))
// Array offsets.
#define MIRROR_ARRAY_LENGTH_OFFSET MIRROR_OBJECT_HEADER_SIZE
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 848c904..5f2caef 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -64,6 +64,7 @@
kJdwpSocketLock,
kRegionSpaceRegionLock,
kTransactionLogLock,
+ kJniWeakGlobalsLock,
kReferenceQueueSoftReferencesLock,
kReferenceQueuePhantomReferencesLock,
kReferenceQueueFinalizerReferencesLock,
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 3155b51..7965cd7 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -87,9 +87,11 @@
CHECK(stack_mask.LoadBit(location.GetValue() / kFrameSlotSize));
break;
case DexRegisterLocation::Kind::kInRegister:
+ case DexRegisterLocation::Kind::kInRegisterHigh:
CHECK_NE(register_mask & (1 << location.GetValue()), 0u);
break;
case DexRegisterLocation::Kind::kInFpuRegister:
+ case DexRegisterLocation::Kind::kInFpuRegisterHigh:
// In Fpu register, should not be a reference.
CHECK(false);
break;
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 11901b3..d2dbff6 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -195,12 +195,6 @@
return klass;
}
-inline mirror::DexCache* ClassLinker::GetDexCache(size_t idx) {
- dex_lock_.AssertSharedHeld(Thread::Current());
- DCHECK(idx < dex_caches_.size());
- return dex_caches_[idx].Read();
-}
-
} // namespace art
#endif // ART_RUNTIME_CLASS_LINKER_INL_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index c179c64..3b505e6 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -273,7 +273,6 @@
array_iftable_(nullptr),
find_array_class_cache_next_victim_(0),
init_done_(false),
- log_new_dex_caches_roots_(false),
log_new_class_table_roots_(false),
intern_table_(intern_table),
quick_resolution_trampoline_(nullptr),
@@ -332,6 +331,12 @@
java_lang_Class->SetSuperClass(java_lang_Object.Get());
mirror::Class::SetStatus(java_lang_Object, mirror::Class::kStatusLoaded, self);
+ java_lang_Object->SetObjectSize(sizeof(mirror::Object));
+ runtime->SetSentinel(heap->AllocObject<true>(self,
+ java_lang_Object.Get(),
+ java_lang_Object->GetObjectSize(),
+ VoidFunctor()));
+
// Object[] next to hold class roots.
Handle<mirror::Class> object_array_class(hs.NewHandle(
AllocClass(self, java_lang_Class.Get(),
@@ -358,9 +363,9 @@
// Setup String.
Handle<mirror::Class> java_lang_String(hs.NewHandle(
AllocClass(self, java_lang_Class.Get(), mirror::String::ClassSize(image_pointer_size_))));
+ java_lang_String->SetStringClass();
mirror::String::SetClass(java_lang_String.Get());
mirror::Class::SetStatus(java_lang_String, mirror::Class::kStatusResolved, self);
- java_lang_String->SetStringClass();
// Setup java.lang.ref.Reference.
Handle<mirror::Class> java_lang_ref_Reference(hs.NewHandle(
@@ -570,16 +575,17 @@
CHECK_EQ(java_lang_ref_Reference->GetClassSize(),
mirror::Reference::ClassSize(image_pointer_size_));
class_root = FindSystemClass(self, "Ljava/lang/ref/FinalizerReference;");
- class_root->SetAccessFlags(class_root->GetAccessFlags() |
- kAccClassIsReference | kAccClassIsFinalizerReference);
+ CHECK_EQ(class_root->GetClassFlags(), mirror::kClassFlagNormal);
+ class_root->SetClassFlags(class_root->GetClassFlags() | mirror::kClassFlagFinalizerReference);
class_root = FindSystemClass(self, "Ljava/lang/ref/PhantomReference;");
- class_root->SetAccessFlags(class_root->GetAccessFlags() | kAccClassIsReference |
- kAccClassIsPhantomReference);
+ CHECK_EQ(class_root->GetClassFlags(), mirror::kClassFlagNormal);
+ class_root->SetClassFlags(class_root->GetClassFlags() | mirror::kClassFlagPhantomReference);
class_root = FindSystemClass(self, "Ljava/lang/ref/SoftReference;");
- class_root->SetAccessFlags(class_root->GetAccessFlags() | kAccClassIsReference);
+ CHECK_EQ(class_root->GetClassFlags(), mirror::kClassFlagNormal);
+ class_root->SetClassFlags(class_root->GetClassFlags() | mirror::kClassFlagSoftReference);
class_root = FindSystemClass(self, "Ljava/lang/ref/WeakReference;");
- class_root->SetAccessFlags(class_root->GetAccessFlags() | kAccClassIsReference |
- kAccClassIsWeakReference);
+ CHECK_EQ(class_root->GetClassFlags(), mirror::kClassFlagNormal);
+ class_root->SetClassFlags(class_root->GetClassFlags() | mirror::kClassFlagWeakReference);
// Setup the ClassLoader, verifying the object_size_.
class_root = FindSystemClass(self, "Ljava/lang/ClassLoader;");
@@ -1142,11 +1148,11 @@
quick_imt_conflict_trampoline_ = oat_file.GetOatHeader().GetQuickImtConflictTrampoline();
quick_generic_jni_trampoline_ = oat_file.GetOatHeader().GetQuickGenericJniTrampoline();
quick_to_interpreter_bridge_trampoline_ = oat_file.GetOatHeader().GetQuickToInterpreterBridge();
+ StackHandleScope<2> hs(self);
mirror::Object* dex_caches_object = space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
- mirror::ObjectArray<mirror::DexCache>* dex_caches =
- dex_caches_object->AsObjectArray<mirror::DexCache>();
+ Handle<mirror::ObjectArray<mirror::DexCache>> dex_caches(
+ hs.NewHandle(dex_caches_object->AsObjectArray<mirror::DexCache>()));
- StackHandleScope<1> hs(self);
Handle<mirror::ObjectArray<mirror::Class>> class_roots(hs.NewHandle(
space->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)->
AsObjectArray<mirror::Class>()));
@@ -1156,6 +1162,13 @@
// as being Strings or not
mirror::String::SetClass(GetClassRoot(kJavaLangString));
+ mirror::Class* java_lang_Object = GetClassRoot(kJavaLangObject);
+ java_lang_Object->SetObjectSize(sizeof(mirror::Object));
+ Runtime::Current()->SetSentinel(Runtime::Current()->GetHeap()->AllocObject<true>(self,
+ java_lang_Object,
+ java_lang_Object->GetObjectSize(),
+ VoidFunctor()));
+
CHECK_EQ(oat_file.GetOatHeader().GetDexFileCount(),
static_cast<uint32_t>(dex_caches->GetLength()));
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
@@ -1249,7 +1262,6 @@
}
bool ClassLinker::ClassInClassTable(mirror::Class* klass) {
- ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
ClassTable* const class_table = ClassTableForClassLoader(klass->GetClassLoader());
return class_table != nullptr && class_table->Contains(klass);
}
@@ -1306,27 +1318,6 @@
// mapped image.
void ClassLinker::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
class_roots_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
- Thread* const self = Thread::Current();
- {
- ReaderMutexLock mu(self, dex_lock_);
- if ((flags & kVisitRootFlagAllRoots) != 0) {
- for (GcRoot<mirror::DexCache>& dex_cache : dex_caches_) {
- dex_cache.VisitRoot(visitor, RootInfo(kRootVMInternal));
- }
- } else if ((flags & kVisitRootFlagNewRoots) != 0) {
- for (size_t index : new_dex_cache_roots_) {
- dex_caches_[index].VisitRoot(visitor, RootInfo(kRootVMInternal));
- }
- }
- if ((flags & kVisitRootFlagClearRootLog) != 0) {
- new_dex_cache_roots_.clear();
- }
- if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
- log_new_dex_caches_roots_ = true;
- } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
- log_new_dex_caches_roots_ = false;
- }
- }
VisitClassRoots(visitor, flags);
array_iftable_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
for (GcRoot<mirror::Class>& root : find_array_class_cache_) {
@@ -1701,7 +1692,6 @@
long_array->GetWithoutChecks(j)));
const DexFile::ClassDef* dex_class_def = cp_dex_file->FindClassDef(descriptor, hash);
if (dex_class_def != nullptr) {
- RegisterDexFile(*cp_dex_file);
mirror::Class* klass = DefineClass(self, descriptor, hash, class_loader,
*cp_dex_file, *dex_class_def);
if (klass == nullptr) {
@@ -1847,7 +1837,7 @@
CHECK(self->IsExceptionPending()); // Expect an OOME.
return nullptr;
}
- klass->SetDexCache(FindDexCache(dex_file));
+ klass->SetDexCache(RegisterDexFile(dex_file));
SetupClass(dex_file, dex_class_def, klass, class_loader.Get());
@@ -2481,58 +2471,52 @@
RegisterDexFile(dex_file, dex_cache);
}
-bool ClassLinker::IsDexFileRegisteredLocked(const DexFile& dex_file) {
- dex_lock_.AssertSharedHeld(Thread::Current());
- for (GcRoot<mirror::DexCache>& root : dex_caches_) {
- mirror::DexCache* dex_cache = root.Read();
- if (dex_cache->GetDexFile() == &dex_file) {
- return true;
- }
- }
- return false;
-}
-
-bool ClassLinker::IsDexFileRegistered(const DexFile& dex_file) {
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
- return IsDexFileRegisteredLocked(dex_file);
-}
-
void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) {
- dex_lock_.AssertExclusiveHeld(Thread::Current());
+ Thread* const self = Thread::Current();
+ dex_lock_.AssertExclusiveHeld(self);
CHECK(dex_cache.Get() != nullptr) << dex_file.GetLocation();
CHECK(dex_cache->GetLocation()->Equals(dex_file.GetLocation()))
<< dex_cache->GetLocation()->ToModifiedUtf8() << " " << dex_file.GetLocation();
- dex_caches_.push_back(GcRoot<mirror::DexCache>(dex_cache.Get()));
- dex_cache->SetDexFile(&dex_file);
- if (log_new_dex_caches_roots_) {
- // TODO: This is not safe if we can remove dex caches.
- new_dex_cache_roots_.push_back(dex_caches_.size() - 1);
+ // Clean up pass to remove null dex caches.
+ // Null dex caches can occur due to class unloading and we are lazily removing null entries.
+ JavaVMExt* const vm = self->GetJniEnv()->vm;
+ for (auto it = dex_caches_.begin(); it != dex_caches_.end();) {
+ mirror::Object* dex_cache_root = self->DecodeJObject(*it);
+ if (dex_cache_root == nullptr) {
+ vm->DeleteWeakGlobalRef(self, *it);
+ it = dex_caches_.erase(it);
+ } else {
+ ++it;
+ }
}
+ dex_caches_.push_back(vm->AddWeakGlobalRef(self, dex_cache.Get()));
+ dex_cache->SetDexFile(&dex_file);
}
-void ClassLinker::RegisterDexFile(const DexFile& dex_file) {
+mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file) {
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, dex_lock_);
- if (IsDexFileRegisteredLocked(dex_file)) {
- return;
+ mirror::DexCache* dex_cache = FindDexCacheLocked(dex_file, true);
+ if (dex_cache != nullptr) {
+ return dex_cache;
}
}
// Don't alloc while holding the lock, since allocation may need to
// suspend all threads and another thread may need the dex_lock_ to
// get to a suspend point.
StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
- CHECK(dex_cache.Get() != nullptr) << "Failed to allocate dex cache for "
- << dex_file.GetLocation();
- {
- WriterMutexLock mu(self, dex_lock_);
- if (IsDexFileRegisteredLocked(dex_file)) {
- return;
- }
- RegisterDexFileLocked(dex_file, dex_cache);
+ Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
+ CHECK(h_dex_cache.Get() != nullptr) << "Failed to allocate dex cache for "
+ << dex_file.GetLocation();
+ WriterMutexLock mu(self, dex_lock_);
+ mirror::DexCache* dex_cache = FindDexCacheLocked(dex_file, true);
+ if (dex_cache != nullptr) {
+ return dex_cache;
}
+ RegisterDexFileLocked(dex_file, h_dex_cache);
+ return h_dex_cache.Get();
}
void ClassLinker::RegisterDexFile(const DexFile& dex_file,
@@ -2541,36 +2525,44 @@
RegisterDexFileLocked(dex_file, dex_cache);
}
-mirror::DexCache* ClassLinker::FindDexCache(const DexFile& dex_file) {
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
+mirror::DexCache* ClassLinker::FindDexCache(const DexFile& dex_file, bool allow_failure) {
+ Thread* const self = Thread::Current();
+ ReaderMutexLock mu(self, dex_lock_);
+ return FindDexCacheLocked(dex_file, allow_failure);
+}
+
+mirror::DexCache* ClassLinker::FindDexCacheLocked(const DexFile& dex_file, bool allow_failure) {
+ Thread* const self = Thread::Current();
// Search assuming unique-ness of dex file.
- for (size_t i = 0; i != dex_caches_.size(); ++i) {
- mirror::DexCache* dex_cache = GetDexCache(i);
- if (dex_cache->GetDexFile() == &dex_file) {
+ for (jobject weak_root : dex_caches_) {
+ mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
+ if (dex_cache != nullptr && dex_cache->GetDexFile() == &dex_file) {
return dex_cache;
}
}
- // Search matching by location name.
+ if (allow_failure) {
+ return nullptr;
+ }
std::string location(dex_file.GetLocation());
- for (size_t i = 0; i != dex_caches_.size(); ++i) {
- mirror::DexCache* dex_cache = GetDexCache(i);
- if (dex_cache->GetDexFile()->GetLocation() == location) {
- return dex_cache;
- }
- }
// Failure, dump diagnostic and abort.
- for (size_t i = 0; i != dex_caches_.size(); ++i) {
- mirror::DexCache* dex_cache = GetDexCache(i);
- LOG(ERROR) << "Registered dex file " << i << " = " << dex_cache->GetDexFile()->GetLocation();
+ for (jobject weak_root : dex_caches_) {
+ mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
+ if (dex_cache != nullptr) {
+ LOG(ERROR) << "Registered dex file " << dex_cache->GetDexFile()->GetLocation();
+ }
}
LOG(FATAL) << "Failed to find DexCache for DexFile " << location;
UNREACHABLE();
}
void ClassLinker::FixupDexCaches(ArtMethod* resolution_method) {
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
- for (auto& dex_cache : dex_caches_) {
- dex_cache.Read()->Fixup(resolution_method, image_pointer_size_);
+ Thread* const self = Thread::Current();
+ ReaderMutexLock mu(self, dex_lock_);
+ for (jobject weak_root : dex_caches_) {
+ mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
+ if (dex_cache != nullptr) {
+ dex_cache->Fixup(resolution_method, image_pointer_size_);
+ }
}
}
@@ -2701,6 +2693,11 @@
new_class->SetVTable(java_lang_Object->GetVTable());
new_class->SetPrimitiveType(Primitive::kPrimNot);
new_class->SetClassLoader(component_type->GetClassLoader());
+ if (component_type->IsPrimitive()) {
+ new_class->SetClassFlags(mirror::kClassFlagNoReferenceFields);
+ } else {
+ new_class->SetClassFlags(mirror::kClassFlagObjectArray);
+ }
mirror::Class::SetStatus(new_class, mirror::Class::kStatusLoaded, self);
{
ArtMethod* imt[mirror::Class::kImtSize];
@@ -3401,11 +3398,13 @@
DCHECK(proxy_class->IsProxyClass());
DCHECK(proxy_method->IsProxyMethod());
{
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
+ Thread* const self = Thread::Current();
+ ReaderMutexLock mu(self, dex_lock_);
// Locate the dex cache of the original interface/Object
- for (const GcRoot<mirror::DexCache>& root : dex_caches_) {
- auto* dex_cache = root.Read();
- if (proxy_method->HasSameDexCacheResolvedTypes(dex_cache->GetResolvedTypes())) {
+ for (jobject weak_root : dex_caches_) {
+ mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
+ if (dex_cache != nullptr &&
+ proxy_method->HasSameDexCacheResolvedTypes(dex_cache->GetResolvedTypes())) {
ArtMethod* resolved_method = dex_cache->GetResolvedMethod(
proxy_method->GetDexMethodIndex(), image_pointer_size_);
CHECK(resolved_method != nullptr);
@@ -4385,9 +4384,10 @@
}
// Inherit reference flags (if any) from the superclass.
- int reference_flags = (super->GetAccessFlags() & kAccReferenceFlagsMask);
+ uint32_t reference_flags = (super->GetClassFlags() & mirror::kClassFlagReference);
if (reference_flags != 0) {
- klass->SetAccessFlags(klass->GetAccessFlags() | reference_flags);
+ CHECK_EQ(klass->GetClassFlags(), 0u);
+ klass->SetClassFlags(klass->GetClassFlags() | reference_flags);
}
// Disallow custom direct subclasses of java.lang.ref.Reference.
if (init_done_ && super == GetClassRoot(kJavaLangRefReference)) {
@@ -5227,6 +5227,31 @@
*class_size = size;
} else {
klass->SetNumReferenceInstanceFields(num_reference_fields);
+ mirror::Class* super_class = klass->GetSuperClass();
+ if (num_reference_fields == 0 || super_class == nullptr) {
+ // object has one reference field, klass, but we ignore it since we always visit the class.
+ // super_class is null iff the class is java.lang.Object.
+ if (super_class == nullptr ||
+ (super_class->GetClassFlags() & mirror::kClassFlagNoReferenceFields) != 0) {
+ klass->SetClassFlags(klass->GetClassFlags() | mirror::kClassFlagNoReferenceFields);
+ }
+ }
+ if (kIsDebugBuild) {
+ DCHECK_EQ(super_class == nullptr, klass->DescriptorEquals("Ljava/lang/Object;"));
+ size_t total_reference_instance_fields = 0;
+ mirror::Class* cur_super = klass.Get();
+ while (cur_super != nullptr) {
+ total_reference_instance_fields += cur_super->NumReferenceInstanceFieldsDuringLinking();
+ cur_super = cur_super->GetSuperClass();
+ }
+ if (super_class == nullptr) {
+ CHECK_EQ(total_reference_instance_fields, 1u) << PrettyDescriptor(klass.Get());
+ } else {
+ // Check that there is at least num_reference_fields other than Object.class.
+ CHECK_GE(total_reference_instance_fields, 1u + num_reference_fields)
+ << PrettyClass(klass.Get());
+ }
+ }
if (!klass->IsVariableSize()) {
std::string temp;
DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp);
@@ -5528,6 +5553,36 @@
}
}
+ArtMethod* ClassLinker::ResolveMethodWithoutInvokeType(const DexFile& dex_file,
+ uint32_t method_idx,
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> class_loader) {
+ ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx, image_pointer_size_);
+ if (resolved != nullptr && !resolved->IsRuntimeMethod()) {
+ DCHECK(resolved->GetDeclaringClassUnchecked() != nullptr) << resolved->GetDexMethodIndex();
+ return resolved;
+ }
+ // Fail, get the declaring class.
+ const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
+ mirror::Class* klass = ResolveType(dex_file, method_id.class_idx_, dex_cache, class_loader);
+ if (klass == nullptr) {
+ Thread::Current()->AssertPendingException();
+ return nullptr;
+ }
+ if (klass->IsInterface()) {
+ LOG(FATAL) << "ResolveAmbiguousMethod: unexpected method in interface: " << PrettyClass(klass);
+ return nullptr;
+ }
+
+ // Search both direct and virtual methods
+ resolved = klass->FindDirectMethod(dex_cache.Get(), method_idx, image_pointer_size_);
+ if (resolved == nullptr) {
+ resolved = klass->FindVirtualMethod(dex_cache.Get(), method_idx, image_pointer_size_);
+ }
+
+ return resolved;
+}
+
ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t field_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, bool is_static) {
@@ -5816,11 +5871,6 @@
// We could move the jobject to the callers, but all call-sites do this...
ScopedObjectAccessUnchecked soa(self);
- // Register the dex files.
- for (const DexFile* dex_file : dex_files) {
- RegisterDexFile(*dex_file);
- }
-
// For now, create a libcore-level DexFile for each ART DexFile. This "explodes" multidex.
StackHandleScope<10> hs(self);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 7243a25..cc56e8b 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -229,6 +229,12 @@
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, ArtMethod* referrer, InvokeType type)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ ArtMethod* ResolveMethodWithoutInvokeType(const DexFile& dex_file,
+ uint32_t method_idx,
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> class_loader)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_, !Roles::uninterruptible_);
ArtField* GetResolvedField(uint32_t field_idx, mirror::Class* field_declaring_class)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -272,7 +278,7 @@
void RunRootClinits() SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
- void RegisterDexFile(const DexFile& dex_file)
+ mirror::DexCache* RegisterDexFile(const DexFile& dex_file)
REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -303,9 +309,7 @@
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::DexCache* FindDexCache(const DexFile& dex_file)
- REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsDexFileRegistered(const DexFile& dex_file)
+ mirror::DexCache* FindDexCache(const DexFile& dex_file, bool allow_failure = false)
REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void FixupDexCaches(ArtMethod* resolution_method)
REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -465,7 +469,7 @@
// Used by image writer for checking.
bool ClassInClassTable(mirror::Class* klass)
- REQUIRES(!Locks::classlinker_classes_lock_)
+ REQUIRES(Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* CreateRuntimeMethod();
@@ -555,8 +559,9 @@
void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
REQUIRES(dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsDexFileRegisteredLocked(const DexFile& dex_file)
- SHARED_REQUIRES(dex_lock_, Locks::mutator_lock_);
+ mirror::DexCache* FindDexCacheLocked(const DexFile& dex_file, bool allow_failure)
+ REQUIRES(dex_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool InitializeClass(Thread* self, Handle<mirror::Class> klass, bool can_run_clinit,
bool can_init_parents)
@@ -625,7 +630,9 @@
size_t GetDexCacheCount() SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) {
return dex_caches_.size();
}
- mirror::DexCache* GetDexCache(size_t idx) SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_);
+ const std::list<jobject>& GetDexCaches() SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) {
+ return dex_caches_;
+ }
const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location)
REQUIRES(!dex_lock_);
@@ -696,8 +703,9 @@
std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
mutable ReaderWriterMutex dex_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- std::vector<size_t> new_dex_cache_roots_ GUARDED_BY(dex_lock_);
- std::vector<GcRoot<mirror::DexCache>> dex_caches_ GUARDED_BY(dex_lock_);
+ // JNI weak globals to allow dex caches to get unloaded. We lazily delete weak globals when we
+ // register new dex files.
+ std::list<jobject> dex_caches_ GUARDED_BY(dex_lock_);
std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_);
// This contains the class laoders which have class tables. It is populated by
@@ -730,7 +738,6 @@
size_t find_array_class_cache_next_victim_;
bool init_done_;
- bool log_new_dex_caches_roots_ GUARDED_BY(dex_lock_);
bool log_new_class_table_roots_ GUARDED_BY(Locks::classlinker_classes_lock_);
InternTable* intern_table_;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 3c84d8f..0d1c875 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -488,6 +488,7 @@
struct ClassOffsets : public CheckOffsets<mirror::Class> {
ClassOffsets() : CheckOffsets<mirror::Class>(false, "Ljava/lang/Class;") {
addOffset(OFFSETOF_MEMBER(mirror::Class, access_flags_), "accessFlags");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, class_flags_), "classFlags");
addOffset(OFFSETOF_MEMBER(mirror::Class, class_loader_), "classLoader");
addOffset(OFFSETOF_MEMBER(mirror::Class, class_size_), "classSize");
addOffset(OFFSETOF_MEMBER(mirror::Class, clinit_thread_id_), "clinitThreadId");
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 5f9e413..56c5d1a 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -551,7 +551,8 @@
}
Thread* self = Thread::Current();
- jobject class_loader = Runtime::Current()->GetClassLinker()->CreatePathClassLoader(self, class_path);
+ jobject class_loader = Runtime::Current()->GetClassLinker()->CreatePathClassLoader(self,
+ class_path);
self->SetClassLoaderOverride(class_loader);
return class_loader;
}
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 52590a5..213f25d 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -32,13 +32,16 @@
#include "base/hash_map.h"
#include "base/logging.h"
#include "base/stringprintf.h"
-#include "class_linker.h"
+#include "class_linker-inl.h"
#include "dex_file-inl.h"
#include "dex_file_verifier.h"
#include "globals.h"
#include "leb128.h"
+#include "mirror/field.h"
+#include "mirror/method.h"
#include "mirror/string.h"
#include "os.h"
+#include "reflection.h"
#include "safe_map.h"
#include "handle_scope-inl.h"
#include "thread.h"
@@ -1044,6 +1047,918 @@
}
}
+// Read a signed integer. "zwidth" is the zero-based byte count.
+static int32_t ReadSignedInt(const uint8_t* ptr, int zwidth) {
+ int32_t val = 0;
+ for (int i = zwidth; i >= 0; --i) {
+ val = ((uint32_t)val >> 8) | (((int32_t)*ptr++) << 24);
+ }
+ val >>= (3 - zwidth) * 8;
+ return val;
+}
+
+// Read an unsigned integer. "zwidth" is the zero-based byte count,
+// "fill_on_right" indicates which side we want to zero-fill from.
+static uint32_t ReadUnsignedInt(const uint8_t* ptr, int zwidth, bool fill_on_right) {
+ uint32_t val = 0;
+ for (int i = zwidth; i >= 0; --i) {
+ val = (val >> 8) | (((uint32_t)*ptr++) << 24);
+ }
+ if (!fill_on_right) {
+ val >>= (3 - zwidth) * 8;
+ }
+ return val;
+}
+
+// Read a signed long. "zwidth" is the zero-based byte count.
+static int64_t ReadSignedLong(const uint8_t* ptr, int zwidth) {
+ int64_t val = 0;
+ for (int i = zwidth; i >= 0; --i) {
+ val = ((uint64_t)val >> 8) | (((int64_t)*ptr++) << 56);
+ }
+ val >>= (7 - zwidth) * 8;
+ return val;
+}
+
+// Read an unsigned long. "zwidth" is the zero-based byte count,
+// "fill_on_right" indicates which side we want to zero-fill from.
+static uint64_t ReadUnsignedLong(const uint8_t* ptr, int zwidth, bool fill_on_right) {
+ uint64_t val = 0;
+ for (int i = zwidth; i >= 0; --i) {
+ val = (val >> 8) | (((uint64_t)*ptr++) << 56);
+ }
+ if (!fill_on_right) {
+ val >>= (7 - zwidth) * 8;
+ }
+ return val;
+}
+
+const DexFile::AnnotationSetItem* DexFile::FindAnnotationSetForField(ArtField* field) const {
+ mirror::Class* klass = field->GetDeclaringClass();
+ const AnnotationsDirectoryItem* annotations_dir = GetAnnotationsDirectory(*klass->GetClassDef());
+ if (annotations_dir == nullptr) {
+ return nullptr;
+ }
+ const FieldAnnotationsItem* field_annotations = GetFieldAnnotations(annotations_dir);
+ if (field_annotations == nullptr) {
+ return nullptr;
+ }
+ uint32_t field_index = field->GetDexFieldIndex();
+ uint32_t field_count = annotations_dir->fields_size_;
+ for (uint32_t i = 0; i < field_count; ++i) {
+ if (field_annotations[i].field_idx_ == field_index) {
+ return GetFieldAnnotationSetItem(field_annotations[i]);
+ }
+ }
+ return nullptr;
+}
+
+mirror::Object* DexFile::GetAnnotationForField(ArtField* field,
+ Handle<mirror::Class> annotation_class) const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass()));
+ return GetAnnotationObjectFromAnnotationSet(
+ field_class, annotation_set, kDexVisibilityRuntime, annotation_class);
+}
+
+mirror::ObjectArray<mirror::Object>* DexFile::GetAnnotationsForField(ArtField* field) const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass()));
+ return ProcessAnnotationSet(field_class, annotation_set, kDexVisibilityRuntime);
+}
+
+mirror::ObjectArray<mirror::Object>* DexFile::GetSignatureAnnotationForField(ArtField* field)
+ const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass()));
+ return GetSignatureValue(field_class, annotation_set);
+}
+
+bool DexFile::IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_class)
+ const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
+ if (annotation_set == nullptr) {
+ return false;
+ }
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass()));
+ const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
+ field_class, annotation_set, kDexVisibilityRuntime, annotation_class);
+ return annotation_item != nullptr;
+}
+
+const DexFile::AnnotationSetItem* DexFile::FindAnnotationSetForMethod(ArtMethod* method) const {
+ mirror::Class* klass = method->GetDeclaringClass();
+ const AnnotationsDirectoryItem* annotations_dir = GetAnnotationsDirectory(*klass->GetClassDef());
+ if (annotations_dir == nullptr) {
+ return nullptr;
+ }
+ const MethodAnnotationsItem* method_annotations = GetMethodAnnotations(annotations_dir);
+ if (method_annotations == nullptr) {
+ return nullptr;
+ }
+ uint32_t method_index = method->GetDexMethodIndex();
+ uint32_t method_count = annotations_dir->methods_size_;
+ for (uint32_t i = 0; i < method_count; ++i) {
+ if (method_annotations[i].method_idx_ == method_index) {
+ return GetMethodAnnotationSetItem(method_annotations[i]);
+ }
+ }
+ return nullptr;
+}
+
+const DexFile::ParameterAnnotationsItem* DexFile::FindAnnotationsItemForMethod(ArtMethod* method)
+ const {
+ mirror::Class* klass = method->GetDeclaringClass();
+ const AnnotationsDirectoryItem* annotations_dir = GetAnnotationsDirectory(*klass->GetClassDef());
+ if (annotations_dir == nullptr) {
+ return nullptr;
+ }
+ const ParameterAnnotationsItem* parameter_annotations = GetParameterAnnotations(annotations_dir);
+ if (parameter_annotations == nullptr) {
+ return nullptr;
+ }
+ uint32_t method_index = method->GetDexMethodIndex();
+ uint32_t parameter_count = annotations_dir->parameters_size_;
+ for (uint32_t i = 0; i < parameter_count; ++i) {
+ if (parameter_annotations[i].method_idx_ == method_index) {
+ return ¶meter_annotations[i];
+ }
+ }
+ return nullptr;
+}
+
+mirror::Object* DexFile::GetAnnotationDefaultValue(ArtMethod* method) const {
+ mirror::Class* klass = method->GetDeclaringClass();
+ const AnnotationsDirectoryItem* annotations_dir = GetAnnotationsDirectory(*klass->GetClassDef());
+ if (annotations_dir == nullptr) {
+ return nullptr;
+ }
+ const AnnotationSetItem* annotation_set = GetClassAnnotationSet(annotations_dir);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ const AnnotationItem* annotation_item = SearchAnnotationSet(annotation_set,
+ "Ldalvik/annotation/AnnotationDefault;", kDexVisibilitySystem);
+ if (annotation_item == nullptr) {
+ return nullptr;
+ }
+ const uint8_t* annotation = SearchEncodedAnnotation(annotation_item->annotation_, "value");
+ if (annotation == nullptr) {
+ return nullptr;
+ }
+ uint8_t header_byte = *(annotation++);
+ if ((header_byte & kDexAnnotationValueTypeMask) != kDexAnnotationAnnotation) {
+ return nullptr;
+ }
+ annotation = SearchEncodedAnnotation(annotation, method->GetName());
+ if (annotation == nullptr) {
+ return nullptr;
+ }
+ AnnotationValue annotation_value;
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::Class> h_klass(hs.NewHandle(klass));
+ Handle<mirror::Class> return_type(hs.NewHandle(method->GetReturnType()));
+ if (!ProcessAnnotationValue(h_klass, &annotation, &annotation_value, return_type, kAllObjects)) {
+ return nullptr;
+ }
+ return annotation_value.value_.GetL();
+}
+
+mirror::Object* DexFile::GetAnnotationForMethod(ArtMethod* method,
+ Handle<mirror::Class> annotation_class) const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass()));
+ return GetAnnotationObjectFromAnnotationSet(method_class, annotation_set,
+ kDexVisibilityRuntime, annotation_class);
+}
+
+mirror::ObjectArray<mirror::Object>* DexFile::GetAnnotationsForMethod(ArtMethod* method) const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass()));
+ return ProcessAnnotationSet(method_class, annotation_set, kDexVisibilityRuntime);
+}
+
+mirror::ObjectArray<mirror::Object>* DexFile::GetExceptionTypesForMethod(ArtMethod* method) const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass()));
+ return GetThrowsValue(method_class, annotation_set);
+}
+
+mirror::ObjectArray<mirror::Object>* DexFile::GetParameterAnnotations(ArtMethod* method) const {
+ const ParameterAnnotationsItem* parameter_annotations = FindAnnotationsItemForMethod(method);
+ if (parameter_annotations == nullptr) {
+ return nullptr;
+ }
+ const AnnotationSetRefList* set_ref_list =
+ GetParameterAnnotationSetRefList(parameter_annotations);
+ if (set_ref_list == nullptr) {
+ return nullptr;
+ }
+ uint32_t size = set_ref_list->size_;
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass()));
+ return ProcessAnnotationSetRefList(method_class, set_ref_list, size);
+}
+
+bool DexFile::IsMethodAnnotationPresent(ArtMethod* method, Handle<mirror::Class> annotation_class)
+ const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
+ if (annotation_set == nullptr) {
+ return false;
+ }
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass()));
+ const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
+ method_class, annotation_set, kDexVisibilityRuntime, annotation_class);
+ return (annotation_item != nullptr);
+}
+
+const DexFile::AnnotationSetItem* DexFile::FindAnnotationSetForClass(Handle<mirror::Class> klass)
+ const {
+ const AnnotationsDirectoryItem* annotations_dir = GetAnnotationsDirectory(*klass->GetClassDef());
+ if (annotations_dir == nullptr) {
+ return nullptr;
+ }
+ return GetClassAnnotationSet(annotations_dir);
+}
+
+mirror::Object* DexFile::GetAnnotationForClass(Handle<mirror::Class> klass,
+ Handle<mirror::Class> annotation_class) const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ return GetAnnotationObjectFromAnnotationSet(klass, annotation_set, kDexVisibilityRuntime,
+ annotation_class);
+}
+
+mirror::ObjectArray<mirror::Object>* DexFile::GetAnnotationsForClass(Handle<mirror::Class> klass)
+ const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ return ProcessAnnotationSet(klass, annotation_set, kDexVisibilityRuntime);
+}
+
+bool DexFile::IsClassAnnotationPresent(Handle<mirror::Class> klass,
+ Handle<mirror::Class> annotation_class) const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ if (annotation_set == nullptr) {
+ return false;
+ }
+ const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
+ klass, annotation_set, kDexVisibilityRuntime, annotation_class);
+ return (annotation_item != nullptr);
+}
+
+mirror::Object* DexFile::CreateAnnotationMember(Handle<mirror::Class> klass,
+ Handle<mirror::Class> annotation_class, const uint8_t** annotation) const {
+ Thread* self = Thread::Current();
+ ScopedObjectAccessUnchecked soa(self);
+ StackHandleScope<5> hs(self);
+ uint32_t element_name_index = DecodeUnsignedLeb128(annotation);
+ const char* name = StringDataByIdx(element_name_index);
+ Handle<mirror::String> string_name(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(self, name)));
+
+ ArtMethod* annotation_method =
+ annotation_class->FindDeclaredVirtualMethodByName(name, sizeof(void*));
+ if (annotation_method == nullptr) {
+ return nullptr;
+ }
+ Handle<mirror::Class> method_return(hs.NewHandle(annotation_method->GetReturnType()));
+
+ AnnotationValue annotation_value;
+ if (!ProcessAnnotationValue(klass, annotation, &annotation_value, method_return, kAllObjects)) {
+ return nullptr;
+ }
+ Handle<mirror::Object> value_object(hs.NewHandle(annotation_value.value_.GetL()));
+
+ mirror::Class* annotation_member_class =
+ WellKnownClasses::ToClass(WellKnownClasses::libcore_reflect_AnnotationMember);
+ Handle<mirror::Object> new_member(hs.NewHandle(annotation_member_class->AllocObject(self)));
+ Handle<mirror::Method> method_object(
+ hs.NewHandle(mirror::Method::CreateFromArtMethod(self, annotation_method)));
+
+ if (new_member.Get() == nullptr || string_name.Get() == nullptr ||
+ method_object.Get() == nullptr || method_return.Get() == nullptr) {
+ LOG(ERROR) << StringPrintf("Failed creating annotation element (m=%p n=%p a=%p r=%p",
+ new_member.Get(), string_name.Get(), method_object.Get(), method_return.Get());
+ return nullptr;
+ }
+
+ JValue result;
+ ArtMethod* annotation_member_init =
+ soa.DecodeMethod(WellKnownClasses::libcore_reflect_AnnotationMember_init);
+ uint32_t args[5] = { static_cast<uint32_t>(reinterpret_cast<uintptr_t>(new_member.Get())),
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(string_name.Get())),
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(value_object.Get())),
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(method_return.Get())),
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(method_object.Get()))
+ };
+ annotation_member_init->Invoke(self, args, sizeof(args), &result, "VLLLL");
+ if (self->IsExceptionPending()) {
+ LOG(INFO) << "Exception in AnnotationMember.<init>";
+ return nullptr;
+ }
+
+ return new_member.Get();
+}
+
+const DexFile::AnnotationItem* DexFile::GetAnnotationItemFromAnnotationSet(
+ Handle<mirror::Class> klass, const AnnotationSetItem* annotation_set, uint32_t visibility,
+ Handle<mirror::Class> annotation_class) const {
+ for (uint32_t i = 0; i < annotation_set->size_; ++i) {
+ const AnnotationItem* annotation_item = GetAnnotationItem(annotation_set, i);
+ if (annotation_item->visibility_ != visibility) {
+ continue;
+ }
+ const uint8_t* annotation = annotation_item->annotation_;
+ uint32_t type_index = DecodeUnsignedLeb128(&annotation);
+ mirror::Class* resolved_class = Runtime::Current()->GetClassLinker()->ResolveType(
+ klass->GetDexFile(), type_index, klass.Get());
+ if (resolved_class == nullptr) {
+ std::string temp;
+ LOG(WARNING) << StringPrintf("Unable to resolve %s annotation class %d",
+ klass->GetDescriptor(&temp), type_index);
+ CHECK(Thread::Current()->IsExceptionPending());
+ Thread::Current()->ClearException();
+ continue;
+ }
+ if (resolved_class == annotation_class.Get()) {
+ return annotation_item;
+ }
+ }
+
+ return nullptr;
+}
+
+mirror::Object* DexFile::GetAnnotationObjectFromAnnotationSet(Handle<mirror::Class> klass,
+ const AnnotationSetItem* annotation_set, uint32_t visibility,
+ Handle<mirror::Class> annotation_class) const {
+ const AnnotationItem* annotation_item =
+ GetAnnotationItemFromAnnotationSet(klass, annotation_set, visibility, annotation_class);
+ if (annotation_item == nullptr) {
+ return nullptr;
+ }
+ const uint8_t* annotation = annotation_item->annotation_;
+ return ProcessEncodedAnnotation(klass, &annotation);
+}
+
+mirror::Object* DexFile::GetAnnotationValue(Handle<mirror::Class> klass,
+ const AnnotationItem* annotation_item, const char* annotation_name,
+ Handle<mirror::Class> array_class, uint32_t expected_type) const {
+ const uint8_t* annotation =
+ SearchEncodedAnnotation(annotation_item->annotation_, annotation_name);
+ if (annotation == nullptr) {
+ return nullptr;
+ }
+ AnnotationValue annotation_value;
+ if (!ProcessAnnotationValue(klass, &annotation, &annotation_value, array_class, kAllObjects)) {
+ return nullptr;
+ }
+ if (annotation_value.type_ != expected_type) {
+ return nullptr;
+ }
+ return annotation_value.value_.GetL();
+}
+
+mirror::ObjectArray<mirror::Object>* DexFile::GetSignatureValue(Handle<mirror::Class> klass,
+ const AnnotationSetItem* annotation_set) const {
+ StackHandleScope<1> hs(Thread::Current());
+ const AnnotationItem* annotation_item =
+ SearchAnnotationSet(annotation_set, "Ldalvik/annotation/Signature;", kDexVisibilitySystem);
+ if (annotation_item == nullptr) {
+ return nullptr;
+ }
+ mirror::Class* string_class = mirror::String::GetJavaLangString();
+ Handle<mirror::Class> string_array_class(hs.NewHandle(
+ Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &string_class)));
+ mirror::Object* obj =
+ GetAnnotationValue(klass, annotation_item, "value", string_array_class, kDexAnnotationArray);
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ return obj->AsObjectArray<mirror::Object>();
+}
+
+mirror::ObjectArray<mirror::Object>* DexFile::GetThrowsValue(Handle<mirror::Class> klass,
+ const AnnotationSetItem* annotation_set) const {
+ StackHandleScope<1> hs(Thread::Current());
+ const AnnotationItem* annotation_item =
+ SearchAnnotationSet(annotation_set, "Ldalvik/annotation/Throws;", kDexVisibilitySystem);
+ if (annotation_item == nullptr) {
+ return nullptr;
+ }
+ mirror::Class* class_class = mirror::Class::GetJavaLangClass();
+ Handle<mirror::Class> class_array_class(hs.NewHandle(
+ Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &class_class)));
+ mirror::Object* obj =
+ GetAnnotationValue(klass, annotation_item, "value", class_array_class, kDexAnnotationArray);
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ return obj->AsObjectArray<mirror::Object>();
+}
+
+mirror::ObjectArray<mirror::Object>* DexFile::ProcessAnnotationSet(Handle<mirror::Class> klass,
+ const AnnotationSetItem* annotation_set, uint32_t visibility) const {
+ Thread* self = Thread::Current();
+ ScopedObjectAccessUnchecked soa(self);
+ StackHandleScope<2> hs(self);
+ Handle<mirror::Class> annotation_array_class(hs.NewHandle(
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array)));
+ if (annotation_set == nullptr) {
+ return mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_class.Get(), 0);
+ }
+
+ uint32_t size = annotation_set->size_;
+ Handle<mirror::ObjectArray<mirror::Object>> result(hs.NewHandle(
+ mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_class.Get(), size)));
+ if (result.Get() == nullptr) {
+ return nullptr;
+ }
+
+ uint32_t dest_index = 0;
+ for (uint32_t i = 0; i < size; ++i) {
+ const AnnotationItem* annotation_item = GetAnnotationItem(annotation_set, i);
+ if (annotation_item->visibility_ != visibility) {
+ continue;
+ }
+ const uint8_t* annotation = annotation_item->annotation_;
+ mirror::Object* annotation_obj = ProcessEncodedAnnotation(klass, &annotation);
+ if (annotation_obj != nullptr) {
+ result->SetWithoutChecks<false>(dest_index, annotation_obj);
+ ++dest_index;
+ }
+ }
+
+ if (dest_index == size) {
+ return result.Get();
+ }
+
+ mirror::ObjectArray<mirror::Object>* trimmed_result =
+ mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_class.Get(), dest_index);
+ for (uint32_t i = 0; i < dest_index; ++i) {
+ mirror::Object* obj = result->GetWithoutChecks(i);
+ trimmed_result->SetWithoutChecks<false>(i, obj);
+ }
+
+ return trimmed_result;
+}
+
+mirror::ObjectArray<mirror::Object>* DexFile::ProcessAnnotationSetRefList(
+ Handle<mirror::Class> klass, const AnnotationSetRefList* set_ref_list, uint32_t size) const {
+ Thread* self = Thread::Current();
+ ScopedObjectAccessUnchecked soa(self);
+ StackHandleScope<1> hs(self);
+ mirror::Class* annotation_array_class =
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
+ mirror::Class* annotation_array_array_class =
+ Runtime::Current()->GetClassLinker()->FindArrayClass(self, &annotation_array_class);
+ Handle<mirror::ObjectArray<mirror::Object>> annotation_array_array(hs.NewHandle(
+ mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_array_class, size)));
+ if (annotation_array_array.Get() == nullptr) {
+ LOG(ERROR) << "Annotation set ref array allocation failed";
+ return nullptr;
+ }
+ for (uint32_t index = 0; index < size; ++index) {
+ const AnnotationSetRefItem* set_ref_item = &set_ref_list->list_[index];
+ const AnnotationSetItem* set_item = GetSetRefItemItem(set_ref_item);
+ mirror::Object* annotation_set = ProcessAnnotationSet(klass, set_item, kDexVisibilityRuntime);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ annotation_array_array->SetWithoutChecks<false>(index, annotation_set);
+ }
+ return annotation_array_array.Get();
+}
+
+bool DexFile::ProcessAnnotationValue(Handle<mirror::Class> klass, const uint8_t** annotation_ptr,
+ AnnotationValue* annotation_value, Handle<mirror::Class> array_class,
+ DexFile::AnnotationResultStyle result_style) const {
+ Thread* self = Thread::Current();
+ mirror::Object* element_object = nullptr;
+ bool set_object = false;
+ Primitive::Type primitive_type = Primitive::kPrimVoid;
+ const uint8_t* annotation = *annotation_ptr;
+ uint8_t header_byte = *(annotation++);
+ uint8_t value_type = header_byte & kDexAnnotationValueTypeMask;
+ uint8_t value_arg = header_byte >> kDexAnnotationValueArgShift;
+ int32_t width = value_arg + 1;
+ annotation_value->type_ = value_type;
+
+ switch (value_type) {
+ case kDexAnnotationByte:
+ annotation_value->value_.SetB(static_cast<int8_t>(ReadSignedInt(annotation, value_arg)));
+ primitive_type = Primitive::kPrimByte;
+ break;
+ case kDexAnnotationShort:
+ annotation_value->value_.SetS(static_cast<int16_t>(ReadSignedInt(annotation, value_arg)));
+ primitive_type = Primitive::kPrimShort;
+ break;
+ case kDexAnnotationChar:
+ annotation_value->value_.SetC(static_cast<uint16_t>(ReadUnsignedInt(annotation, value_arg,
+ false)));
+ primitive_type = Primitive::kPrimChar;
+ break;
+ case kDexAnnotationInt:
+ annotation_value->value_.SetI(ReadSignedInt(annotation, value_arg));
+ primitive_type = Primitive::kPrimInt;
+ break;
+ case kDexAnnotationLong:
+ annotation_value->value_.SetJ(ReadSignedLong(annotation, value_arg));
+ primitive_type = Primitive::kPrimLong;
+ break;
+ case kDexAnnotationFloat:
+ annotation_value->value_.SetI(ReadUnsignedInt(annotation, value_arg, true));
+ primitive_type = Primitive::kPrimFloat;
+ break;
+ case kDexAnnotationDouble:
+ annotation_value->value_.SetJ(ReadUnsignedLong(annotation, value_arg, true));
+ primitive_type = Primitive::kPrimDouble;
+ break;
+ case kDexAnnotationBoolean:
+ annotation_value->value_.SetZ(value_arg != 0);
+ primitive_type = Primitive::kPrimBoolean;
+ width = 0;
+ break;
+ case kDexAnnotationString: {
+ uint32_t index = ReadUnsignedInt(annotation, value_arg, false);
+ if (result_style == kAllRaw) {
+ annotation_value->value_.SetI(index);
+ } else {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
+ element_object = Runtime::Current()->GetClassLinker()->ResolveString(
+ klass->GetDexFile(), index, dex_cache);
+ set_object = true;
+ if (element_object == nullptr) {
+ return false;
+ }
+ }
+ break;
+ }
+ case kDexAnnotationType: {
+ uint32_t index = ReadUnsignedInt(annotation, value_arg, false);
+ if (result_style == kAllRaw) {
+ annotation_value->value_.SetI(index);
+ } else {
+ element_object = Runtime::Current()->GetClassLinker()->ResolveType(
+ klass->GetDexFile(), index, klass.Get());
+ set_object = true;
+ if (element_object == nullptr) {
+ self->ClearException();
+ const char* msg = StringByTypeIdx(index);
+ self->ThrowNewException("Ljava/lang/TypeNotPresentException;", msg);
+ }
+ }
+ break;
+ }
+ case kDexAnnotationMethod: {
+ uint32_t index = ReadUnsignedInt(annotation, value_arg, false);
+ if (result_style == kAllRaw) {
+ annotation_value->value_.SetI(index);
+ } else {
+ StackHandleScope<2> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
+ ArtMethod* method = Runtime::Current()->GetClassLinker()->ResolveMethodWithoutInvokeType(
+ klass->GetDexFile(), index, dex_cache, class_loader);
+ if (method == nullptr) {
+ return false;
+ }
+ set_object = true;
+ if (method->IsConstructor()) {
+ element_object = mirror::Constructor::CreateFromArtMethod(self, method);
+ } else {
+ element_object = mirror::Method::CreateFromArtMethod(self, method);
+ }
+ if (element_object == nullptr) {
+ return false;
+ }
+ }
+ break;
+ }
+ case kDexAnnotationField: {
+ uint32_t index = ReadUnsignedInt(annotation, value_arg, false);
+ if (result_style == kAllRaw) {
+ annotation_value->value_.SetI(index);
+ } else {
+ StackHandleScope<2> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
+ ArtField* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(
+ klass->GetDexFile(), index, dex_cache, class_loader);
+ if (field == nullptr) {
+ return false;
+ }
+ set_object = true;
+ element_object = mirror::Field::CreateFromArtField(self, field, true);
+ if (element_object == nullptr) {
+ return false;
+ }
+ }
+ break;
+ }
+ case kDexAnnotationEnum: {
+ uint32_t index = ReadUnsignedInt(annotation, value_arg, false);
+ if (result_style == kAllRaw) {
+ annotation_value->value_.SetI(index);
+ } else {
+ StackHandleScope<3> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
+ ArtField* enum_field = Runtime::Current()->GetClassLinker()->ResolveField(
+ klass->GetDexFile(), index, dex_cache, class_loader, true);
+ Handle<mirror::Class> field_class(hs.NewHandle(enum_field->GetDeclaringClass()));
+ if (enum_field == nullptr) {
+ return false;
+ } else {
+ Runtime::Current()->GetClassLinker()->EnsureInitialized(self, field_class, true, true);
+ element_object = enum_field->GetObject(field_class.Get());
+ set_object = true;
+ }
+ }
+ break;
+ }
+ case kDexAnnotationArray:
+ if (result_style == kAllRaw || array_class.Get() == nullptr) {
+ return false;
+ } else {
+ ScopedObjectAccessUnchecked soa(self);
+ StackHandleScope<2> hs(self);
+ uint32_t size = DecodeUnsignedLeb128(&annotation);
+ Handle<mirror::Class> component_type(hs.NewHandle(array_class->GetComponentType()));
+ Handle<mirror::Array> new_array(hs.NewHandle(mirror::Array::Alloc<true>(
+ self, array_class.Get(), size, array_class->GetComponentSizeShift(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator())));
+ if (new_array.Get() == nullptr) {
+ LOG(ERROR) << "Annotation element array allocation failed with size " << size;
+ return false;
+ }
+ AnnotationValue new_annotation_value;
+ for (uint32_t i = 0; i < size; ++i) {
+ if (!ProcessAnnotationValue(klass, &annotation, &new_annotation_value, component_type,
+ kPrimitivesOrObjects)) {
+ return false;
+ }
+ if (!component_type->IsPrimitive()) {
+ mirror::Object* obj = new_annotation_value.value_.GetL();
+ new_array->AsObjectArray<mirror::Object>()->SetWithoutChecks<false>(i, obj);
+ } else {
+ switch (new_annotation_value.type_) {
+ case kDexAnnotationByte:
+ new_array->AsByteArray()->SetWithoutChecks<false>(
+ i, new_annotation_value.value_.GetB());
+ break;
+ case kDexAnnotationShort:
+ new_array->AsShortArray()->SetWithoutChecks<false>(
+ i, new_annotation_value.value_.GetS());
+ break;
+ case kDexAnnotationChar:
+ new_array->AsCharArray()->SetWithoutChecks<false>(
+ i, new_annotation_value.value_.GetC());
+ break;
+ case kDexAnnotationInt:
+ new_array->AsIntArray()->SetWithoutChecks<false>(
+ i, new_annotation_value.value_.GetI());
+ break;
+ case kDexAnnotationLong:
+ new_array->AsLongArray()->SetWithoutChecks<false>(
+ i, new_annotation_value.value_.GetJ());
+ break;
+ case kDexAnnotationFloat:
+ new_array->AsFloatArray()->SetWithoutChecks<false>(
+ i, new_annotation_value.value_.GetF());
+ break;
+ case kDexAnnotationDouble:
+ new_array->AsDoubleArray()->SetWithoutChecks<false>(
+ i, new_annotation_value.value_.GetD());
+ break;
+ case kDexAnnotationBoolean:
+ new_array->AsBooleanArray()->SetWithoutChecks<false>(
+ i, new_annotation_value.value_.GetZ());
+ break;
+ default:
+ LOG(FATAL) << "Found invalid annotation value type while building annotation array";
+ return false;
+ }
+ }
+ }
+ element_object = new_array.Get();
+ set_object = true;
+ width = 0;
+ }
+ break;
+ case kDexAnnotationAnnotation:
+ if (result_style == kAllRaw) {
+ return false;
+ }
+ element_object = ProcessEncodedAnnotation(klass, &annotation);
+ if (element_object == nullptr) {
+ return false;
+ }
+ set_object = true;
+ width = 0;
+ break;
+ case kDexAnnotationNull:
+ if (result_style == kAllRaw) {
+ annotation_value->value_.SetI(0);
+ } else {
+ CHECK(element_object == nullptr);
+ set_object = true;
+ }
+ width = 0;
+ break;
+ default:
+ LOG(ERROR) << StringPrintf("Bad annotation element value type 0x%02x", value_type);
+ return false;
+ }
+
+ annotation += width;
+ *annotation_ptr = annotation;
+
+ if (result_style == kAllObjects && primitive_type != Primitive::kPrimVoid) {
+ element_object = BoxPrimitive(primitive_type, annotation_value->value_);
+ set_object = true;
+ }
+
+ if (set_object) {
+ annotation_value->value_.SetL(element_object);
+ }
+
+ return true;
+}
+
+mirror::Object* DexFile::ProcessEncodedAnnotation(Handle<mirror::Class> klass,
+ const uint8_t** annotation) const {
+ uint32_t type_index = DecodeUnsignedLeb128(annotation);
+ uint32_t size = DecodeUnsignedLeb128(annotation);
+
+ Thread* self = Thread::Current();
+ ScopedObjectAccessUnchecked soa(self);
+ StackHandleScope<2> hs(self);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Handle<mirror::Class> annotation_class(hs.NewHandle(
+ class_linker->ResolveType(klass->GetDexFile(), type_index, klass.Get())));
+ if (annotation_class.Get() == nullptr) {
+ LOG(INFO) << "Unable to resolve " << PrettyClass(klass.Get()) << " annotation class "
+ << type_index;
+ DCHECK(Thread::Current()->IsExceptionPending());
+ Thread::Current()->ClearException();
+ return nullptr;
+ }
+
+ mirror::Class* annotation_member_class =
+ soa.Decode<mirror::Class*>(WellKnownClasses::libcore_reflect_AnnotationMember);
+ mirror::Class* annotation_member_array_class =
+ class_linker->FindArrayClass(self, &annotation_member_class);
+ mirror::ObjectArray<mirror::Object>* element_array = nullptr;
+
+ if (size > 0) {
+ element_array =
+ mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_member_array_class, size);
+ if (element_array == nullptr) {
+ LOG(ERROR) << "Failed to allocate annotation member array (" << size << " elements)";
+ return nullptr;
+ }
+ }
+
+ Handle<mirror::ObjectArray<mirror::Object>> h_element_array(hs.NewHandle(element_array));
+ for (uint32_t i = 0; i < size; ++i) {
+ mirror::Object* new_member = CreateAnnotationMember(klass, annotation_class, annotation);
+ if (new_member == nullptr) {
+ return nullptr;
+ }
+ h_element_array->SetWithoutChecks<false>(i, new_member);
+ }
+
+ JValue result;
+ ArtMethod* create_annotation_method =
+ soa.DecodeMethod(WellKnownClasses::libcore_reflect_AnnotationFactory_createAnnotation);
+ uint32_t args[2] = { static_cast<uint32_t>(reinterpret_cast<uintptr_t>(annotation_class.Get())),
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(h_element_array.Get())) };
+ create_annotation_method->Invoke(self, args, sizeof(args), &result, "LLL");
+ if (self->IsExceptionPending()) {
+ LOG(INFO) << "Exception in AnnotationFactory.createAnnotation";
+ return nullptr;
+ }
+
+ return result.GetL();
+}
+
+const DexFile::AnnotationItem* DexFile::SearchAnnotationSet(const AnnotationSetItem* annotation_set,
+ const char* descriptor, uint32_t visibility) const {
+ const AnnotationItem* result = nullptr;
+ for (uint32_t i = 0; i < annotation_set->size_; ++i) {
+ const AnnotationItem* annotation_item = GetAnnotationItem(annotation_set, i);
+ if (annotation_item->visibility_ != visibility) {
+ continue;
+ }
+ const uint8_t* annotation = annotation_item->annotation_;
+ uint32_t type_index = DecodeUnsignedLeb128(&annotation);
+
+ if (strcmp(descriptor, StringByTypeIdx(type_index)) == 0) {
+ result = annotation_item;
+ break;
+ }
+ }
+ return result;
+}
+
+const uint8_t* DexFile::SearchEncodedAnnotation(const uint8_t* annotation, const char* name) const {
+ DecodeUnsignedLeb128(&annotation); // unused type_index
+ uint32_t size = DecodeUnsignedLeb128(&annotation);
+
+ while (size != 0) {
+ uint32_t element_name_index = DecodeUnsignedLeb128(&annotation);
+ const char* element_name = GetStringData(GetStringId(element_name_index));
+ if (strcmp(name, element_name) == 0) {
+ return annotation;
+ }
+ SkipAnnotationValue(&annotation);
+ size--;
+ }
+ return nullptr;
+}
+
+bool DexFile::SkipAnnotationValue(const uint8_t** annotation_ptr) const {
+ const uint8_t* annotation = *annotation_ptr;
+ uint8_t header_byte = *(annotation++);
+ uint8_t value_type = header_byte & kDexAnnotationValueTypeMask;
+ uint8_t value_arg = header_byte >> kDexAnnotationValueArgShift;
+ int32_t width = value_arg + 1;
+
+ switch (value_type) {
+ case kDexAnnotationByte:
+ case kDexAnnotationShort:
+ case kDexAnnotationChar:
+ case kDexAnnotationInt:
+ case kDexAnnotationLong:
+ case kDexAnnotationFloat:
+ case kDexAnnotationDouble:
+ case kDexAnnotationString:
+ case kDexAnnotationType:
+ case kDexAnnotationMethod:
+ case kDexAnnotationField:
+ case kDexAnnotationEnum:
+ break;
+ case kDexAnnotationArray:
+ {
+ uint32_t size = DecodeUnsignedLeb128(&annotation);
+ while (size--) {
+ if (!SkipAnnotationValue(&annotation)) {
+ return false;
+ }
+ }
+ width = 0;
+ break;
+ }
+ case kDexAnnotationAnnotation:
+ {
+ DecodeUnsignedLeb128(&annotation); // unused type_index
+ uint32_t size = DecodeUnsignedLeb128(&annotation);
+ while (size--) {
+ DecodeUnsignedLeb128(&annotation); // unused element_name_index
+ if (!SkipAnnotationValue(&annotation)) {
+ return false;
+ }
+ }
+ width = 0;
+ break;
+ }
+ case kDexAnnotationBoolean:
+ case kDexAnnotationNull:
+ width = 0;
+ break;
+ default:
+ LOG(FATAL) << StringPrintf("Bad annotation element value byte 0x%02x", value_type);
+ return false;
+ }
+
+ annotation += width;
+ *annotation_ptr = annotation;
+ return true;
+}
+
std::ostream& operator<<(std::ostream& os, const DexFile& dex_file) {
os << StringPrintf("[DexFile: %s dex-checksum=%08x location-checksum=%08x %p-%p]",
dex_file.GetLocation().c_str(),
@@ -1127,60 +2042,6 @@
}
}
-// Read a signed integer. "zwidth" is the zero-based byte count.
-static int32_t ReadSignedInt(const uint8_t* ptr, int zwidth) {
- int32_t val = 0;
- for (int i = zwidth; i >= 0; --i) {
- val = ((uint32_t)val >> 8) | (((int32_t)*ptr++) << 24);
- }
- val >>= (3 - zwidth) * 8;
- return val;
-}
-
-// Read an unsigned integer. "zwidth" is the zero-based byte count,
-// "fill_on_right" indicates which side we want to zero-fill from.
-static uint32_t ReadUnsignedInt(const uint8_t* ptr, int zwidth, bool fill_on_right) {
- uint32_t val = 0;
- if (!fill_on_right) {
- for (int i = zwidth; i >= 0; --i) {
- val = (val >> 8) | (((uint32_t)*ptr++) << 24);
- }
- val >>= (3 - zwidth) * 8;
- } else {
- for (int i = zwidth; i >= 0; --i) {
- val = (val >> 8) | (((uint32_t)*ptr++) << 24);
- }
- }
- return val;
-}
-
-// Read a signed long. "zwidth" is the zero-based byte count.
-static int64_t ReadSignedLong(const uint8_t* ptr, int zwidth) {
- int64_t val = 0;
- for (int i = zwidth; i >= 0; --i) {
- val = ((uint64_t)val >> 8) | (((int64_t)*ptr++) << 56);
- }
- val >>= (7 - zwidth) * 8;
- return val;
-}
-
-// Read an unsigned long. "zwidth" is the zero-based byte count,
-// "fill_on_right" indicates which side we want to zero-fill from.
-static uint64_t ReadUnsignedLong(const uint8_t* ptr, int zwidth, bool fill_on_right) {
- uint64_t val = 0;
- if (!fill_on_right) {
- for (int i = zwidth; i >= 0; --i) {
- val = (val >> 8) | (((uint64_t)*ptr++) << 56);
- }
- val >>= (7 - zwidth) * 8;
- } else {
- for (int i = zwidth; i >= 0; --i) {
- val = (val >> 8) | (((uint64_t)*ptr++) << 56);
- }
- }
- return val;
-}
-
EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(
const DexFile& dex_file, Handle<mirror::DexCache>* dex_cache,
Handle<mirror::ClassLoader>* class_loader, ClassLinker* linker,
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index fc805f8..8928321 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -28,6 +28,8 @@
#include "globals.h"
#include "invoke_type.h"
#include "jni.h"
+#include "jvalue.h"
+#include "mirror/object_array.h"
#include "modifiers.h"
#include "utf.h"
@@ -384,6 +386,17 @@
DISALLOW_COPY_AND_ASSIGN(AnnotationItem);
};
+ struct AnnotationValue {
+ JValue value_;
+ uint8_t type_;
+ };
+
+ enum AnnotationResultStyle { // private
+ kAllObjects,
+ kPrimitivesOrObjects,
+ kAllRaw
+ };
+
// Returns the checksum of a file for comparison with GetLocationChecksum().
// For .dex files, this is the header checksum.
// For zip files, this is the classes.dex zip entry CRC32 checksum.
@@ -817,6 +830,187 @@
static bool LineNumForPcCb(void* context, uint32_t address, uint32_t line_num);
+ const AnnotationsDirectoryItem* GetAnnotationsDirectory(const ClassDef& class_def) const {
+ if (class_def.annotations_off_ == 0) {
+ return nullptr;
+ } else {
+ return reinterpret_cast<const AnnotationsDirectoryItem*>(begin_ + class_def.annotations_off_);
+ }
+ }
+
+ const AnnotationSetItem* GetClassAnnotationSet(const AnnotationsDirectoryItem* anno_dir) const {
+ if (anno_dir->class_annotations_off_ == 0) {
+ return nullptr;
+ } else {
+ return reinterpret_cast<const AnnotationSetItem*>(begin_ + anno_dir->class_annotations_off_);
+ }
+ }
+
+ const FieldAnnotationsItem* GetFieldAnnotations(const AnnotationsDirectoryItem* anno_dir) const {
+ if (anno_dir->fields_size_ == 0) {
+ return nullptr;
+ } else {
+ return reinterpret_cast<const FieldAnnotationsItem*>(&anno_dir[1]);
+ }
+ }
+
+ const MethodAnnotationsItem* GetMethodAnnotations(const AnnotationsDirectoryItem* anno_dir)
+ const {
+ if (anno_dir->methods_size_ == 0) {
+ return nullptr;
+ } else {
+ // Skip past the header and field annotations.
+ const uint8_t* addr = reinterpret_cast<const uint8_t*>(&anno_dir[1]);
+ addr += anno_dir->fields_size_ * sizeof(FieldAnnotationsItem);
+ return reinterpret_cast<const MethodAnnotationsItem*>(addr);
+ }
+ }
+
+ const ParameterAnnotationsItem* GetParameterAnnotations(const AnnotationsDirectoryItem* anno_dir)
+ const {
+ if (anno_dir->parameters_size_ == 0) {
+ return nullptr;
+ } else {
+ // Skip past the header, field annotations, and method annotations.
+ const uint8_t* addr = reinterpret_cast<const uint8_t*>(&anno_dir[1]);
+ addr += anno_dir->fields_size_ * sizeof(FieldAnnotationsItem);
+ addr += anno_dir->methods_size_ * sizeof(MethodAnnotationsItem);
+ return reinterpret_cast<const ParameterAnnotationsItem*>(addr);
+ }
+ }
+
+ const AnnotationSetItem* GetFieldAnnotationSetItem(const FieldAnnotationsItem& anno_item) const {
+ uint32_t offset = anno_item.annotations_off_;
+ if (offset == 0) {
+ return nullptr;
+ } else {
+ return reinterpret_cast<const AnnotationSetItem*>(begin_ + offset);
+ }
+ }
+
+ const AnnotationSetItem* GetMethodAnnotationSetItem(const MethodAnnotationsItem& anno_item)
+ const {
+ uint32_t offset = anno_item.annotations_off_;
+ if (offset == 0) {
+ return nullptr;
+ } else {
+ return reinterpret_cast<const AnnotationSetItem*>(begin_ + offset);
+ }
+ }
+
+ const AnnotationSetRefList* GetParameterAnnotationSetRefList(
+ const ParameterAnnotationsItem* anno_item) const {
+ uint32_t offset = anno_item->annotations_off_;
+ if (offset == 0) {
+ return nullptr;
+ }
+ return reinterpret_cast<const AnnotationSetRefList*>(begin_ + offset);
+ }
+
+ const AnnotationItem* GetAnnotationItem(const AnnotationSetItem* set_item, uint32_t index) const {
+ DCHECK_LE(index, set_item->size_);
+ uint32_t offset = set_item->entries_[index];
+ if (offset == 0) {
+ return nullptr;
+ } else {
+ return reinterpret_cast<const AnnotationItem*>(begin_ + offset);
+ }
+ }
+
+ const AnnotationSetItem* GetSetRefItemItem(const AnnotationSetRefItem* anno_item) const {
+ uint32_t offset = anno_item->annotations_off_;
+ if (offset == 0) {
+ return nullptr;
+ }
+ return reinterpret_cast<const AnnotationSetItem*>(begin_ + offset);
+ }
+
+ const AnnotationSetItem* FindAnnotationSetForField(ArtField* field) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> annotation_class)
+ const SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ObjectArray<mirror::Object>* GetSignatureAnnotationForField(ArtField* field) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_class) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ const AnnotationSetItem* FindAnnotationSetForMethod(ArtMethod* method) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ const ParameterAnnotationsItem* FindAnnotationsItemForMethod(ArtMethod* method) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class> annotation_class)
+ const SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ObjectArray<mirror::Object>* GetExceptionTypesForMethod(ArtMethod* method) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsMethodAnnotationPresent(ArtMethod* method, Handle<mirror::Class> annotation_class) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ const AnnotationSetItem* FindAnnotationSetForClass(Handle<mirror::Class> klass) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
+ Handle<mirror::Class> annotation_class) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsClassAnnotationPresent(Handle<mirror::Class> klass, Handle<mirror::Class> annotation_class)
+ const SHARED_REQUIRES(Locks::mutator_lock_);
+
+ mirror::Object* CreateAnnotationMember(Handle<mirror::Class> klass,
+ Handle<mirror::Class> annotation_class,
+ const uint8_t** annotation) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ const AnnotationItem* GetAnnotationItemFromAnnotationSet(Handle<mirror::Class> klass,
+ const AnnotationSetItem* annotation_set,
+ uint32_t visibility,
+ Handle<mirror::Class> annotation_class)
+ const SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetAnnotationObjectFromAnnotationSet(Handle<mirror::Class> klass,
+ const AnnotationSetItem* annotation_set,
+ uint32_t visibility,
+ Handle<mirror::Class> annotation_class) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetAnnotationValue(Handle<mirror::Class> klass,
+ const AnnotationItem* annotation_item,
+ const char* annotation_name,
+ Handle<mirror::Class> array_class,
+ uint32_t expected_type) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ObjectArray<mirror::Object>* GetSignatureValue(Handle<mirror::Class> klass,
+ const AnnotationSetItem* annotation_set)
+ const SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ObjectArray<mirror::Object>* GetThrowsValue(Handle<mirror::Class> klass,
+ const AnnotationSetItem* annotation_set) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(Handle<mirror::Class> klass,
+ const AnnotationSetItem* annotation_set,
+ uint32_t visibility) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList(Handle<mirror::Class> klass,
+ const AnnotationSetRefList* set_ref_list, uint32_t size) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool ProcessAnnotationValue(Handle<mirror::Class> klass, const uint8_t** annotation_ptr,
+ AnnotationValue* annotation_value, Handle<mirror::Class> return_class,
+ DexFile::AnnotationResultStyle result_style) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* ProcessEncodedAnnotation(Handle<mirror::Class> klass,
+ const uint8_t** annotation) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ const AnnotationItem* SearchAnnotationSet(const AnnotationSetItem* annotation_set,
+ const char* descriptor, uint32_t visibility) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ const uint8_t* SearchEncodedAnnotation(const uint8_t* annotation, const char* name) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool SkipAnnotationValue(const uint8_t** annotation_ptr) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Debug info opcodes and constants
enum {
DBG_END_SEQUENCE = 0x00,
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 0a1d806..7361d34 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -558,7 +558,7 @@
}
// TODO: Currently the read barrier does not have a fast path. Ideally the slow path should only
-// take one parameter "ref", which is generated by the fast path.
+// take one parameter "ref", which is given by the fast path.
extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUSED,
mirror::Object* obj, uint32_t offset) {
DCHECK(kUseReadBarrier);
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index a3cc831..56edcc9 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -35,10 +35,17 @@
obj->VisitReferences(visitor, ref_visitor);
if (kCountScannedTypes) {
mirror::Class* klass = obj->GetClass<kVerifyNone>();
- if (UNLIKELY(klass == mirror::Class::GetJavaLangClass())) {
+ uint32_t class_flags = klass->GetClassFlags();
+ if ((class_flags & mirror::kClassFlagNoReferenceFields) != 0) {
+ ++no_reference_class_count_;
+ } else if (class_flags == mirror::kClassFlagNormal) {
+ ++normal_count_;
+ } else if (class_flags == mirror::kClassFlagObjectArray) {
+ ++object_array_count_;
+ } else if (class_flags == mirror::kClassFlagClass) {
++class_count_;
- } else if (UNLIKELY(klass->IsArrayClass<kVerifyNone>())) {
- ++array_count_;
+ } else if ((class_flags & mirror::kClassFlagReference) != 0) {
+ ++reference_count_;
} else {
++other_count_;
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index b0a8a5b..7ddc7cc 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -70,7 +70,6 @@
static constexpr bool kProfileLargeObjects = false;
static constexpr bool kMeasureOverhead = false;
static constexpr bool kCountTasks = false;
-static constexpr bool kCountJavaLangRefs = false;
static constexpr bool kCountMarkedObjects = false;
// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
@@ -114,15 +113,17 @@
mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
+ no_reference_class_count_.StoreRelaxed(0);
+ normal_count_.StoreRelaxed(0);
class_count_.StoreRelaxed(0);
- array_count_.StoreRelaxed(0);
+ object_array_count_.StoreRelaxed(0);
other_count_.StoreRelaxed(0);
+ reference_count_.StoreRelaxed(0);
large_object_test_.StoreRelaxed(0);
large_object_mark_.StoreRelaxed(0);
overhead_time_ .StoreRelaxed(0);
work_chunks_created_.StoreRelaxed(0);
work_chunks_deleted_.StoreRelaxed(0);
- reference_count_.StoreRelaxed(0);
mark_null_count_.StoreRelaxed(0);
mark_immune_count_.StoreRelaxed(0);
mark_fastpath_count_.StoreRelaxed(0);
@@ -1265,9 +1266,6 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
- if (kCountJavaLangRefs) {
- ++reference_count_;
- }
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
}
@@ -1386,8 +1384,14 @@
void MarkSweep::FinishPhase() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
if (kCountScannedTypes) {
- VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed()
- << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed();
+ VLOG(gc)
+ << "MarkSweep scanned"
+ << " no reference objects=" << no_reference_class_count_.LoadRelaxed()
+ << " normal objects=" << normal_count_.LoadRelaxed()
+ << " classes=" << class_count_.LoadRelaxed()
+ << " object arrays=" << object_array_count_.LoadRelaxed()
+ << " references=" << reference_count_.LoadRelaxed()
+ << " other=" << other_count_.LoadRelaxed();
}
if (kCountTasks) {
VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
@@ -1399,9 +1403,6 @@
VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
<< " marked " << large_object_mark_.LoadRelaxed();
}
- if (kCountJavaLangRefs) {
- VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed();
- }
if (kCountMarkedObjects) {
VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
<< " immune=" << mark_immune_count_.LoadRelaxed()
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 8bd1dc7..371bba5 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -245,7 +245,7 @@
void RevokeAllThreadLocalBuffers();
// Whether or not we count how many of each type of object were scanned.
- static const bool kCountScannedTypes = false;
+ static constexpr bool kCountScannedTypes = false;
// Current space, we check this space first to avoid searching for the appropriate space for an
// object.
@@ -260,18 +260,23 @@
// Parallel finger.
AtomicInteger atomic_finger_;
+
+ AtomicInteger no_reference_class_count_;
+ AtomicInteger normal_count_;
// Number of classes scanned, if kCountScannedTypes.
AtomicInteger class_count_;
- // Number of arrays scanned, if kCountScannedTypes.
- AtomicInteger array_count_;
+ // Number of object arrays scanned, if kCountScannedTypes.
+ AtomicInteger object_array_count_;
// Number of non-class/arrays scanned, if kCountScannedTypes.
AtomicInteger other_count_;
+ // Number of java.lang.ref.Reference instances.
+ AtomicInteger reference_count_;
+
AtomicInteger large_object_test_;
AtomicInteger large_object_mark_;
AtomicInteger overhead_time_;
AtomicInteger work_chunks_created_;
AtomicInteger work_chunks_deleted_;
- AtomicInteger reference_count_;
AtomicInteger mark_null_count_;
AtomicInteger mark_immune_count_;
AtomicInteger mark_fastpath_count_;
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index a355d40..ed63ed0 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -89,14 +89,24 @@
SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
: GarbageCollector(heap,
name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
+ mark_stack_(nullptr),
+ is_large_object_space_immune_(false),
to_space_(nullptr),
+ to_space_live_bitmap_(nullptr),
from_space_(nullptr),
+ mark_bitmap_(nullptr),
+ self_(nullptr),
generational_(generational),
last_gc_to_space_end_(nullptr),
bytes_promoted_(0),
bytes_promoted_since_last_whole_heap_collection_(0),
large_object_bytes_allocated_at_last_whole_heap_collection_(0),
collect_from_space_only_(generational),
+ promo_dest_space_(nullptr),
+ fallback_space_(nullptr),
+ bytes_moved_(0U),
+ objects_moved_(0U),
+ saved_bytes_(0U),
collector_name_(name_),
swap_semi_spaces_(true) {
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d7f918b..b8c4478 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3707,7 +3707,7 @@
void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
- (c->IsVariableSize() || c->GetObjectSize() == byte_count));
+ (c->IsVariableSize() || c->GetObjectSize() == byte_count)) << c->GetClassFlags();
CHECK_GE(byte_count, sizeof(mirror::Object));
}
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index a9a236f..ee6b020 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -421,6 +421,7 @@
fd_(fd),
direct_to_ddms_(direct_to_ddms),
start_ns_(NanoTime()),
+ output_(nullptr),
current_heap_(HPROF_HEAP_DEFAULT),
objects_in_segment_(0),
next_string_id_(0x400000),
diff --git a/runtime/image.cc b/runtime/image.cc
index 2586959..8df17c6 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '9', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '0', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/image.h b/runtime/image.h
index 1a0d8fd..e2d59f9 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -78,7 +78,10 @@
// header of image files written by ImageWriter, read and validated by Space.
class PACKED(4) ImageHeader {
public:
- ImageHeader() : compile_pic_(0) {}
+ ImageHeader()
+ : image_begin_(0U), image_size_(0U), oat_checksum_(0U), oat_file_begin_(0U),
+ oat_data_begin_(0U), oat_data_end_(0U), oat_file_end_(0U), patch_delta_(0),
+ image_roots_(0U), pointer_size_(0U), compile_pic_(0) {}
ImageHeader(uint32_t image_begin,
uint32_t image_size_,
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 9d41018..2fd0517 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -373,7 +373,7 @@
globals_(gGlobalsInitial, gGlobalsMax, kGlobal),
libraries_(new Libraries),
unchecked_functions_(&gJniInvokeInterface),
- weak_globals_lock_("JNI weak global reference table lock"),
+ weak_globals_lock_("JNI weak global reference table lock", kJniWeakGlobalsLock),
weak_globals_(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
allow_new_weak_globals_(true),
weak_globals_add_condition_("weak globals add condition", weak_globals_lock_) {
diff --git a/runtime/jdwp/jdwp_socket.cc b/runtime/jdwp/jdwp_socket.cc
index cbdde24..4fb6df1 100644
--- a/runtime/jdwp/jdwp_socket.cc
+++ b/runtime/jdwp/jdwp_socket.cc
@@ -46,9 +46,11 @@
uint16_t listenPort;
int listenSock; /* listen for connection from debugger */
- explicit JdwpSocketState(JdwpState* state) : JdwpNetStateBase(state) {
- listenPort = 0;
- listenSock = -1;
+ explicit JdwpSocketState(JdwpState* state)
+ : JdwpNetStateBase(state),
+ listenPort(0U),
+ listenSock(-1),
+ remote_port_(0U) {
}
virtual bool Accept();
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index cd678f6..b2c6e4d 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -547,6 +547,7 @@
inline String* Class::GetName() {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Class, name_));
}
+
inline void Class::SetName(String* name) {
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, name_), name);
@@ -784,9 +785,17 @@
inline void Class::SetAccessFlags(uint32_t new_access_flags) {
// Called inside a transaction when setting pre-verified flag during boot image compilation.
if (Runtime::Current()->IsActiveTransaction()) {
- SetField32<true>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), new_access_flags);
+ SetField32<true>(AccessFlagsOffset(), new_access_flags);
} else {
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), new_access_flags);
+ SetField32<false>(AccessFlagsOffset(), new_access_flags);
+ }
+}
+
+inline void Class::SetClassFlags(uint32_t new_flags) {
+ if (Runtime::Current()->IsActiveTransaction()) {
+ SetField32<true>(OFFSET_OF_OBJECT_MEMBER(Class, class_flags_), new_flags);
+ } else {
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_flags_), new_flags);
}
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 055b3e5..2ac44fc 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -44,6 +44,7 @@
<< java_lang_Class_.Read()
<< " " << java_lang_Class;
CHECK(java_lang_Class != nullptr);
+ java_lang_Class->SetClassFlags(mirror::kClassFlagClass);
java_lang_Class_ = GcRoot<Class>(java_lang_Class);
}
@@ -77,6 +78,12 @@
CHECK_NE(h_this->GetStatus(), kStatusError)
<< "Attempt to set as erroneous an already erroneous class "
<< PrettyClass(h_this.Get());
+ if (VLOG_IS_ON(class_linker)) {
+ LOG(ERROR) << "Setting " << PrettyDescriptor(h_this.Get()) << " to erroneous.";
+ if (self->IsExceptionPending()) {
+ LOG(ERROR) << "Exception: " << self->GetException()->Dump();
+ }
+ }
// Stash current exception.
StackHandleScope<1> hs(self);
@@ -504,6 +511,16 @@
return nullptr;
}
+ArtMethod* Class::FindDeclaredVirtualMethodByName(const StringPiece& name, size_t pointer_size) {
+ for (auto& method : GetVirtualMethods(pointer_size)) {
+ ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size);
+ if (name == np_method->GetName()) {
+ return &method;
+ }
+ }
+ return nullptr;
+}
+
ArtMethod* Class::FindVirtualMethod(
const StringPiece& name, const StringPiece& signature, size_t pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 3f375be..1420e5b 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -19,6 +19,7 @@
#include "base/iteration_range.h"
#include "dex_file.h"
+#include "class_flags.h"
#include "gc_root.h"
#include "gc/allocator_type.h"
#include "invoke_type.h"
@@ -201,6 +202,12 @@
return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE uint32_t GetClassFlags() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_flags_));
+ }
+ void SetClassFlags(uint32_t new_flags) SHARED_REQUIRES(Locks::mutator_lock_);
+
void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if the class is an interface.
@@ -228,21 +235,19 @@
}
ALWAYS_INLINE bool IsStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return (GetField32(AccessFlagsOffset()) & kAccClassIsStringClass) != 0;
+ return (GetClassFlags() & kClassFlagString) != 0;
}
ALWAYS_INLINE void SetStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
- SetAccessFlags(flags | kAccClassIsStringClass);
+ SetClassFlags(kClassFlagString | kClassFlagNoReferenceFields);
}
ALWAYS_INLINE bool IsClassLoaderClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return (GetField32(AccessFlagsOffset()) & kAccClassIsClassLoaderClass) != 0;
+ return GetClassFlags() == kClassFlagClassLoader;
}
ALWAYS_INLINE void SetClassLoaderClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
- SetAccessFlags(flags | kAccClassIsClassLoaderClass);
+ SetClassFlags(kClassFlagClassLoader);
}
// Returns true if the class is abstract.
@@ -272,27 +277,27 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsTypeOfReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return (GetAccessFlags<kVerifyFlags>() & kAccClassIsReference) != 0;
+ return (GetClassFlags<kVerifyFlags>() & kClassFlagReference) != 0;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsWeakReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return (GetAccessFlags<kVerifyFlags>() & kAccClassIsWeakReference) != 0;
+ return GetClassFlags<kVerifyFlags>() == kClassFlagWeakReference;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsSoftReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return (GetAccessFlags<kVerifyFlags>() & kAccReferenceFlagsMask) == kAccClassIsReference;
+ return GetClassFlags<kVerifyFlags>() == kClassFlagSoftReference;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsFinalizerReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return (GetAccessFlags<kVerifyFlags>() & kAccClassIsFinalizerReference) != 0;
+ return GetClassFlags<kVerifyFlags>() == kClassFlagFinalizerReference;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPhantomReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return (GetAccessFlags<kVerifyFlags>() & kAccClassIsPhantomReference) != 0;
+ return GetClassFlags<kVerifyFlags>() == kClassFlagPhantomReference;
}
// Can references of this type be assigned to by things of another type? For non-array types
@@ -826,6 +831,9 @@
size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* FindDeclaredVirtualMethodByName(const StringPiece& name, size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature,
size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -862,7 +870,8 @@
uint32_t NumInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_);
ArtField* GetInstanceField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
- // Returns the number of instance fields containing reference types.
+ // Returns the number of instance fields containing reference types. Does not count fields in any
+ // super classes.
uint32_t NumReferenceInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsResolved() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
@@ -1225,6 +1234,9 @@
// length-prefixed array.
uint64_t virtual_methods_;
+ // Class flags to help speed up visiting object references.
+ uint32_t class_flags_;
+
// Total size of the Class instance; used when allocating storage on gc heap.
// See also object_size_.
uint32_t class_size_;
diff --git a/runtime/mirror/class_flags.h b/runtime/mirror/class_flags.h
new file mode 100644
index 0000000..eb2e2eb
--- /dev/null
+++ b/runtime/mirror/class_flags.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_CLASS_FLAGS_H_
+#define ART_RUNTIME_MIRROR_CLASS_FLAGS_H_
+
+#include <stdint.h>
+
+namespace art {
+namespace mirror {
+
+// Normal instance with at least one ref field other than the class.
+static constexpr uint32_t kClassFlagNormal = 0x00000000;
+
+// Only normal objects which have no reference fields, e.g. string or primitive array or normal
+// class instance with no fields other than klass.
+static constexpr uint32_t kClassFlagNoReferenceFields = 0x00000001;
+
+// Class is java.lang.String.class.
+static constexpr uint32_t kClassFlagString = 0x00000004;
+
+// Class is an object array class.
+static constexpr uint32_t kClassFlagObjectArray = 0x00000008;
+
+// Class is java.lang.Class.class.
+static constexpr uint32_t kClassFlagClass = 0x00000010;
+
+// Class is ClassLoader or one of its subclasses.
+static constexpr uint32_t kClassFlagClassLoader = 0x00000020;
+
+// Class is a soft/weak/phantom class.
+static constexpr uint32_t kClassFlagSoftReference = 0x00000040;
+
+// Class is a weak reference class.
+static constexpr uint32_t kClassFlagWeakReference = 0x00000080;
+
+// Class is a finalizer reference class.
+static constexpr uint32_t kClassFlagFinalizerReference = 0x00000100;
+
+// Class is the phantom reference class.
+static constexpr uint32_t kClassFlagPhantomReference = 0x00000200;
+
+// Combination of flags to figure out if the class is either the weak/soft/phantom/finalizer
+// reference class.
+static constexpr uint32_t kClassFlagReference =
+ kClassFlagSoftReference |
+ kClassFlagWeakReference |
+ kClassFlagFinalizerReference |
+ kClassFlagPhantomReference;
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_CLASS_FLAGS_H_
+
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 586ae30..e35ddcc 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -24,6 +24,7 @@
#include "atomic.h"
#include "array-inl.h"
#include "class.h"
+#include "class_flags.h"
#include "class_linker.h"
#include "class_loader-inl.h"
#include "lock_word-inl.h"
@@ -1010,20 +1011,46 @@
const JavaLangRefVisitor& ref_visitor) {
mirror::Class* klass = GetClass<kVerifyFlags>();
visitor(this, ClassOffset(), false);
- if (klass == Class::GetJavaLangClass()) {
- AsClass<kVerifyNone>()->VisitReferences(klass, visitor);
- } else if (klass->IsArrayClass() || klass->IsStringClass()) {
- if (klass->IsObjectArrayClass<kVerifyNone>()) {
- AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences(visitor);
- }
- } else if (klass->IsClassLoaderClass()) {
- mirror::ClassLoader* class_loader = AsClassLoader<kVerifyFlags>();
- class_loader->VisitReferences<kVerifyFlags>(klass, visitor);
- } else {
+ const uint32_t class_flags = klass->GetClassFlags<kVerifyNone>();
+ if (LIKELY(class_flags == kClassFlagNormal)) {
DCHECK(!klass->IsVariableSize());
VisitInstanceFieldsReferences(klass, visitor);
- if (UNLIKELY(klass->IsTypeOfReferenceClass<kVerifyNone>())) {
- ref_visitor(klass, AsReference());
+ DCHECK(!klass->IsClassClass());
+ DCHECK(!klass->IsStringClass());
+ DCHECK(!klass->IsClassLoaderClass());
+ DCHECK(!klass->IsArrayClass());
+ } else {
+ if ((class_flags & kClassFlagNoReferenceFields) == 0) {
+ DCHECK(!klass->IsStringClass());
+ if (class_flags == kClassFlagClass) {
+ DCHECK(klass->IsClassClass());
+ AsClass<kVerifyNone>()->VisitReferences(klass, visitor);
+ } else if (class_flags == kClassFlagObjectArray) {
+ DCHECK(klass->IsObjectArrayClass());
+ AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences(visitor);
+ } else if ((class_flags & kClassFlagReference) != 0) {
+ VisitInstanceFieldsReferences(klass, visitor);
+ ref_visitor(klass, AsReference());
+ } else {
+ mirror::ClassLoader* const class_loader = AsClassLoader<kVerifyFlags>();
+ class_loader->VisitReferences<kVerifyFlags>(klass, visitor);
+ }
+ } else if (kIsDebugBuild) {
+ CHECK(!klass->IsClassClass());
+ CHECK(!klass->IsObjectArrayClass());
+ // String still has instance fields for reflection purposes but these don't exist in
+ // actual string instances.
+ if (!klass->IsStringClass()) {
+ size_t total_reference_instance_fields = 0;
+ mirror::Class* super_class = klass;
+ do {
+ total_reference_instance_fields += super_class->NumReferenceInstanceFields();
+ super_class = super_class->GetSuperClass();
+ } while (super_class != nullptr);
+ // The only reference field should be the object's class. This field is handled at the
+ // beginning of the function.
+ CHECK_EQ(total_reference_instance_fields, 1u);
+ }
}
}
}
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 3a39f58..eda6c9b 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -18,8 +18,10 @@
#define ART_RUNTIME_MIRROR_STRING_INL_H_
#include "array.h"
+#include "base/bit_utils.h"
#include "class.h"
#include "gc/heap-inl.h"
+#include "globals.h"
#include "intern_table.h"
#include "runtime.h"
#include "string.h"
@@ -142,7 +144,12 @@
template<VerifyObjectFlags kVerifyFlags>
inline size_t String::SizeOf() {
- return sizeof(String) + (sizeof(uint16_t) * GetLength<kVerifyFlags>());
+ size_t size = sizeof(String) + (sizeof(uint16_t) * GetLength<kVerifyFlags>());
+ // String.equals() intrinsics assume zero-padding up to kObjectAlignment,
+ // so make sure the padding is actually zero-initialized if the allocator
+ // chooses to clear, or GC compaction chooses to copy, only SizeOf() bytes.
+ // http://b/23528461
+ return RoundUp(size, kObjectAlignment);
}
template <bool kIsInstrumented, typename PreFenceVisitor>
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index b6236b1..45610dc 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -55,6 +55,7 @@
void String::SetClass(Class* java_lang_String) {
CHECK(java_lang_String_.IsNull());
CHECK(java_lang_String != nullptr);
+ CHECK(java_lang_String->IsStringClass());
java_lang_String_ = GcRoot<Class>(java_lang_String);
}
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index eb2e1f6..fbee2d7 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -157,10 +157,9 @@
return java_lang_String_.Read();
}
- static void SetClass(Class* java_lang_String);
- static void ResetClass();
- static void VisitRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetClass(Class* java_lang_String) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
private:
void SetHashCode(int32_t new_hash_code) SHARED_REQUIRES(Locks::mutator_lock_) {
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index 0d9ec29..f7ab10b 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -19,6 +19,8 @@
#include <stdint.h>
+namespace art {
+
static constexpr uint32_t kAccPublic = 0x0001; // class, field, method, ic
static constexpr uint32_t kAccPrivate = 0x0002; // field, method, ic
static constexpr uint32_t kAccProtected = 0x0004; // field, method, ic
@@ -49,28 +51,8 @@
static constexpr uint32_t kAccMiranda = 0x00200000; // method (dex only)
// Special runtime-only flags.
-// Note: if only kAccClassIsReference is set, we have a soft reference.
-
-// class is ClassLoader or one of its subclasses
-static constexpr uint32_t kAccClassIsClassLoaderClass = 0x10000000;
-
// class/ancestor overrides finalize()
static constexpr uint32_t kAccClassIsFinalizable = 0x80000000;
-// class is a soft/weak/phantom ref
-static constexpr uint32_t kAccClassIsReference = 0x08000000;
-// class is a weak reference
-static constexpr uint32_t kAccClassIsWeakReference = 0x04000000;
-// class is a finalizer reference
-static constexpr uint32_t kAccClassIsFinalizerReference = 0x02000000;
-// class is a phantom reference
-static constexpr uint32_t kAccClassIsPhantomReference = 0x01000000;
-// class is the string class
-static constexpr uint32_t kAccClassIsStringClass = 0x00800000;
-
-static constexpr uint32_t kAccReferenceFlagsMask = (kAccClassIsReference
- | kAccClassIsWeakReference
- | kAccClassIsFinalizerReference
- | kAccClassIsPhantomReference);
// Valid (meaningful) bits for a field.
static constexpr uint32_t kAccValidFieldFlags = kAccPublic | kAccPrivate | kAccProtected |
@@ -95,5 +77,7 @@
static constexpr uint32_t kAccValidInterfaceFlags = kAccPublic | kAccInterface |
kAccAbstract | kAccSynthetic | kAccAnnotation;
+} // namespace art
+
#endif // ART_RUNTIME_MODIFIERS_H_
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 4f97d20..9bd320c 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -171,7 +171,7 @@
if (array == nullptr) {
ScopedObjectAccess soa(env);
for (auto& dex_file : dex_files) {
- if (Runtime::Current()->GetClassLinker()->IsDexFileRegistered(*dex_file)) {
+ if (Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file, true) != nullptr) {
dex_file.release();
}
}
@@ -209,7 +209,7 @@
// TODO: The Runtime should support unloading of classes and freeing of the
// dex files for those unloaded classes rather than leaking dex files here.
for (auto& dex_file : *dex_files) {
- if (!Runtime::Current()->GetClassLinker()->IsDexFileRegistered(*dex_file)) {
+ if (Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file, true) == nullptr) {
delete dex_file;
}
}
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 9ea339a..5a9c43b 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -425,14 +425,16 @@
static void PreloadDexCachesStatsFilled(DexCacheStats* filled)
SHARED_REQUIRES(Locks::mutator_lock_) {
if (!kPreloadDexCachesCollectStats) {
- return;
+ return;
}
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
- const std::vector<const DexFile*>& boot_class_path = linker->GetBootClassPath();
- for (size_t i = 0; i< boot_class_path.size(); i++) {
- const DexFile* dex_file = boot_class_path[i];
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
CHECK(dex_file != nullptr);
- mirror::DexCache* dex_cache = linker->FindDexCache(*dex_file);
+ mirror::DexCache* const dex_cache = class_linker->FindDexCache(*dex_file, true);
+ // If dex cache was deallocated, just continue.
+ if (dex_cache == nullptr) {
+ continue;
+ }
for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
mirror::String* string = dex_cache->GetResolvedString(j);
if (string != nullptr) {
@@ -446,7 +448,7 @@
}
}
for (size_t j = 0; j < dex_cache->NumResolvedFields(); j++) {
- ArtField* field = linker->GetResolvedField(j, dex_cache);
+ ArtField* field = class_linker->GetResolvedField(j, dex_cache);
if (field != nullptr) {
filled->num_fields++;
}
@@ -490,11 +492,11 @@
}
const std::vector<const DexFile*>& boot_class_path = linker->GetBootClassPath();
- for (size_t i = 0; i< boot_class_path.size(); i++) {
+ for (size_t i = 0; i < boot_class_path.size(); i++) {
const DexFile* dex_file = boot_class_path[i];
CHECK(dex_file != nullptr);
StackHandleScope<1> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->RegisterDexFile(*dex_file)));
if (kPreloadDexCachesStrings) {
for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index c337e91..7e464e9 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -426,6 +426,45 @@
return soa.AddLocalReference<jobjectArray>(ret.Get());
}
+static jobject Class_getDeclaredAnnotation(JNIEnv* env, jobject javaThis, jclass annotationType) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+ if (klass->IsProxyClass()) {
+ return nullptr;
+ }
+ Handle<mirror::Class> annotation_class(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
+ return soa.AddLocalReference<jobject>(
+ klass->GetDexFile().GetAnnotationForClass(klass, annotation_class));
+}
+
+static jobjectArray Class_getDeclaredAnnotations(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+ if (klass->IsProxyClass()) {
+ // Return an empty array instead of a null pointer.
+ mirror::Class* annotation_array_class =
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
+ mirror::ObjectArray<mirror::Object>* empty_array =
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class, 0);
+ return soa.AddLocalReference<jobjectArray>(empty_array);
+ }
+ return soa.AddLocalReference<jobjectArray>(klass->GetDexFile().GetAnnotationsForClass(klass));
+}
+
+static jboolean Class_isDeclaredAnnotationPresent(JNIEnv* env, jobject javaThis,
+ jclass annotationType) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+ if (klass->IsProxyClass()) {
+ return false;
+ }
+ Handle<mirror::Class> annotation_class(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
+ return klass->GetDexFile().IsClassAnnotationPresent(klass, annotation_class);
+}
+
static jobject Class_newInstance(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<4> hs(soa.Self());
@@ -508,6 +547,9 @@
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(Class, classForName,
"!(Ljava/lang/String;ZLjava/lang/ClassLoader;)Ljava/lang/Class;"),
+ NATIVE_METHOD(Class, getDeclaredAnnotation,
+ "!(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"),
+ NATIVE_METHOD(Class, getDeclaredAnnotations, "!()[Ljava/lang/annotation/Annotation;"),
NATIVE_METHOD(Class, getDeclaredConstructorInternal,
"!([Ljava/lang/Class;)Ljava/lang/reflect/Constructor;"),
NATIVE_METHOD(Class, getDeclaredConstructorsInternal, "!(Z)[Ljava/lang/reflect/Constructor;"),
@@ -518,10 +560,11 @@
NATIVE_METHOD(Class, getDeclaredMethodInternal,
"!(Ljava/lang/String;[Ljava/lang/Class;)Ljava/lang/reflect/Method;"),
NATIVE_METHOD(Class, getDeclaredMethodsUnchecked,
- "!(Z)[Ljava/lang/reflect/Method;"),
+ "!(Z)[Ljava/lang/reflect/Method;"),
NATIVE_METHOD(Class, getNameNative, "!()Ljava/lang/String;"),
NATIVE_METHOD(Class, getProxyInterfaces, "!()[Ljava/lang/Class;"),
NATIVE_METHOD(Class, getPublicDeclaredFields, "!()[Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, isDeclaredAnnotationPresent, "!(Ljava/lang/Class;)Z"),
NATIVE_METHOD(Class, newInstance, "!()Ljava/lang/Object;"),
};
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 93ba84a..b4b77e7 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -18,6 +18,7 @@
#include "art_method-inl.h"
#include "class_linker.h"
+#include "class_linker-inl.h"
#include "jni_internal.h"
#include "mirror/class-inl.h"
#include "mirror/method.h"
@@ -28,6 +29,55 @@
namespace art {
+static jobject Constructor_getAnnotationNative(JNIEnv* env, jobject javaMethod,
+ jclass annotationType) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
+ return soa.AddLocalReference<jobject>(
+ method->GetDexFile()->GetAnnotationForMethod(method, klass));
+}
+
+static jobjectArray Constructor_getDeclaredAnnotations(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ return soa.AddLocalReference<jobjectArray>(method->GetDexFile()->GetAnnotationsForMethod(method));
+}
+
+static jobjectArray Constructor_getExceptionTypes(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ mirror::ObjectArray<mirror::Object>* result_array =
+ method->GetDexFile()->GetExceptionTypesForMethod(method);
+ if (result_array == nullptr) {
+ // Return an empty array instead of a null pointer.
+ mirror::Class* class_class = mirror::Class::GetJavaLangClass();
+ mirror::Class* class_array_class =
+ Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
+ mirror::ObjectArray<mirror::Object>* empty_array =
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), class_array_class, 0);
+ return soa.AddLocalReference<jobjectArray>(empty_array);
+ } else {
+ return soa.AddLocalReference<jobjectArray>(result_array);
+ }
+}
+
+static jobjectArray Constructor_getParameterAnnotationsNative(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ return soa.AddLocalReference<jobjectArray>(method->GetDexFile()->GetParameterAnnotations(method));
+}
+
+static jboolean Constructor_isAnnotationPresentNative(JNIEnv* env, jobject javaMethod,
+ jclass annotationType) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
+ return method->GetDexFile()->IsMethodAnnotationPresent(method, klass);
+}
+
/*
* We can also safely assume the constructor isn't associated
* with an interface, array, or primitive class. If this is coming from
@@ -82,6 +132,13 @@
}
static JNINativeMethod gMethods[] = {
+ NATIVE_METHOD(Constructor, getAnnotationNative,
+ "!(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"),
+ NATIVE_METHOD(Constructor, getDeclaredAnnotations, "!()[Ljava/lang/annotation/Annotation;"),
+ NATIVE_METHOD(Constructor, getExceptionTypes, "!()[Ljava/lang/Class;"),
+ NATIVE_METHOD(Constructor, getParameterAnnotationsNative,
+ "!()[[Ljava/lang/annotation/Annotation;"),
+ NATIVE_METHOD(Constructor, isAnnotationPresentNative, "!(Ljava/lang/Class;)Z"),
NATIVE_METHOD(Constructor, newInstance, "!([Ljava/lang/Object;)Ljava/lang/Object;"),
};
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 5bbb0dc..aac800a 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -415,16 +415,68 @@
SetPrimitiveField<Primitive::kPrimShort>(env, javaField, javaObj, value);
}
+static jobject Field_getAnnotationNative(JNIEnv* env, jobject javaField, jclass annotationType) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField();
+ if (field->GetDeclaringClass()->IsProxyClass()) {
+ return nullptr;
+ }
+ Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
+ return soa.AddLocalReference<jobject>(field->GetDexFile()->GetAnnotationForField(field, klass));
+}
+
+static jobjectArray Field_getDeclaredAnnotations(JNIEnv* env, jobject javaField) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField();
+ if (field->GetDeclaringClass()->IsProxyClass()) {
+ // Return an empty array instead of a null pointer.
+ mirror::Class* annotation_array_class =
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
+ mirror::ObjectArray<mirror::Object>* empty_array =
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class, 0);
+ return soa.AddLocalReference<jobjectArray>(empty_array);
+ }
+ return soa.AddLocalReference<jobjectArray>(field->GetDexFile()->GetAnnotationsForField(field));
+}
+
+static jobjectArray Field_getSignatureAnnotation(JNIEnv* env, jobject javaField) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField();
+ if (field->GetDeclaringClass()->IsProxyClass()) {
+ return nullptr;
+ }
+ return soa.AddLocalReference<jobjectArray>(
+ field->GetDexFile()->GetSignatureAnnotationForField(field));
+}
+
+static jboolean Field_isAnnotationPresentNative(JNIEnv* env, jobject javaField,
+ jclass annotationType) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField();
+ if (field->GetDeclaringClass()->IsProxyClass()) {
+ return false;
+ }
+ Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
+ return field->GetDexFile()->IsFieldAnnotationPresent(field, klass);
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(Field, get, "!(Ljava/lang/Object;)Ljava/lang/Object;"),
NATIVE_METHOD(Field, getBoolean, "!(Ljava/lang/Object;)Z"),
NATIVE_METHOD(Field, getByte, "!(Ljava/lang/Object;)B"),
NATIVE_METHOD(Field, getChar, "!(Ljava/lang/Object;)C"),
+ NATIVE_METHOD(Field, getAnnotationNative,
+ "!(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"),
+ NATIVE_METHOD(Field, getDeclaredAnnotations, "!()[Ljava/lang/annotation/Annotation;"),
+ NATIVE_METHOD(Field, getSignatureAnnotation, "!()[Ljava/lang/String;"),
NATIVE_METHOD(Field, getDouble, "!(Ljava/lang/Object;)D"),
NATIVE_METHOD(Field, getFloat, "!(Ljava/lang/Object;)F"),
NATIVE_METHOD(Field, getInt, "!(Ljava/lang/Object;)I"),
NATIVE_METHOD(Field, getLong, "!(Ljava/lang/Object;)J"),
NATIVE_METHOD(Field, getShort, "!(Ljava/lang/Object;)S"),
+ NATIVE_METHOD(Field, isAnnotationPresentNative, "!(Ljava/lang/Class;)Z"),
NATIVE_METHOD(Field, set, "!(Ljava/lang/Object;Ljava/lang/Object;)V"),
NATIVE_METHOD(Field, setBoolean, "!(Ljava/lang/Object;Z)V"),
NATIVE_METHOD(Field, setByte, "!(Ljava/lang/Object;B)V"),
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 9533b4d..1219f85 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -18,6 +18,7 @@
#include "art_method-inl.h"
#include "class_linker.h"
+#include "class_linker-inl.h"
#include "jni_internal.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
@@ -28,35 +29,111 @@
namespace art {
+static jobject Method_getAnnotationNative(JNIEnv* env, jobject javaMethod, jclass annotationType) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (method->GetDeclaringClass()->IsProxyClass()) {
+ return nullptr;
+ }
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
+ return soa.AddLocalReference<jobject>(
+ method->GetDexFile()->GetAnnotationForMethod(method, klass));
+}
+
+static jobjectArray Method_getDeclaredAnnotations(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (method->GetDeclaringClass()->IsProxyClass()) {
+ // Return an empty array instead of a null pointer.
+ mirror::Class* annotation_array_class =
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
+ mirror::ObjectArray<mirror::Object>* empty_array =
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class, 0);
+ return soa.AddLocalReference<jobjectArray>(empty_array);
+ }
+ return soa.AddLocalReference<jobjectArray>(method->GetDexFile()->GetAnnotationsForMethod(method));
+}
+
+static jobject Method_getDefaultValue(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (!method->GetDeclaringClass()->IsAnnotation()) {
+ return nullptr;
+ }
+ return soa.AddLocalReference<jobject>(method->GetDexFile()->GetAnnotationDefaultValue(method));
+}
+
+static jobjectArray Method_getExceptionTypes(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (method->GetDeclaringClass()->IsProxyClass()) {
+ mirror::Class* klass = method->GetDeclaringClass();
+ int throws_index = -1;
+ size_t i = 0;
+ for (const auto& m : klass->GetVirtualMethods(sizeof(void*))) {
+ if (&m == method) {
+ throws_index = i;
+ break;
+ }
+ ++i;
+ }
+ CHECK_NE(throws_index, -1);
+ mirror::ObjectArray<mirror::Class>* declared_exceptions = klass->GetThrows()->Get(throws_index);
+ return soa.AddLocalReference<jobjectArray>(declared_exceptions->Clone(soa.Self()));
+ } else {
+ mirror::ObjectArray<mirror::Object>* result_array =
+ method->GetDexFile()->GetExceptionTypesForMethod(method);
+ if (result_array == nullptr) {
+ // Return an empty array instead of a null pointer
+ mirror::Class* class_class = mirror::Class::GetJavaLangClass();
+ mirror::Class* class_array_class =
+ Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
+ mirror::ObjectArray<mirror::Object>* empty_array =
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), class_array_class, 0);
+ return soa.AddLocalReference<jobjectArray>(empty_array);
+ } else {
+ return soa.AddLocalReference<jobjectArray>(result_array);
+ }
+ }
+}
+
+static jobjectArray Method_getParameterAnnotationsNative(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (method->GetDeclaringClass()->IsProxyClass()) {
+ return nullptr;
+ }
+ return soa.AddLocalReference<jobjectArray>(method->GetDexFile()->GetParameterAnnotations(method));
+}
+
static jobject Method_invoke(JNIEnv* env, jobject javaMethod, jobject javaReceiver,
jobject javaArgs) {
ScopedFastNativeObjectAccess soa(env);
return InvokeMethod(soa, javaMethod, javaReceiver, javaArgs);
}
-static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) {
+static jboolean Method_isAnnotationPresentNative(JNIEnv* env, jobject javaMethod,
+ jclass annotationType) {
ScopedFastNativeObjectAccess soa(env);
- ArtMethod* proxy_method = ArtMethod::FromReflectedMethod(soa, javaMethod);
- CHECK(proxy_method->GetDeclaringClass()->IsProxyClass());
- mirror::Class* proxy_class = proxy_method->GetDeclaringClass();
- int throws_index = -1;
- size_t i = 0;
- for (const auto& m : proxy_class->GetVirtualMethods(sizeof(void*))) {
- if (&m == proxy_method) {
- throws_index = i;
- break;
- }
- ++i;
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (method->GetDeclaringClass()->IsProxyClass()) {
+ return false;
}
- CHECK_NE(throws_index, -1);
- mirror::ObjectArray<mirror::Class>* declared_exceptions =
- proxy_class->GetThrows()->Get(throws_index);
- return soa.AddLocalReference<jobject>(declared_exceptions->Clone(soa.Self()));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
+ return method->GetDexFile()->IsMethodAnnotationPresent(method, klass);
}
static JNINativeMethod gMethods[] = {
+ NATIVE_METHOD(Method, getAnnotationNative,
+ "!(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"),
+ NATIVE_METHOD(Method, getDeclaredAnnotations, "!()[Ljava/lang/annotation/Annotation;"),
+ NATIVE_METHOD(Method, getDefaultValue, "!()Ljava/lang/Object;"),
+ NATIVE_METHOD(Method, getExceptionTypes, "!()[Ljava/lang/Class;"),
+ NATIVE_METHOD(Method, getParameterAnnotationsNative, "!()[[Ljava/lang/annotation/Annotation;"),
NATIVE_METHOD(Method, invoke, "!(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;"),
- NATIVE_METHOD(Method, getExceptionTypesNative, "!()[Ljava/lang/Class;"),
+ NATIVE_METHOD(Method, isAnnotationPresentNative, "!(Ljava/lang/Class;)Z"),
};
void register_java_lang_reflect_Method(JNIEnv* env) {
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index 33cfe08..7e8c551 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -467,9 +467,14 @@
// Profile Table.
// This holds a mapping of ArtMethod* to a count of how many times a sample
// hit it at the top of the stack.
-ProfileSampleResults::ProfileSampleResults(Mutex& lock) : lock_(lock), num_samples_(0),
- num_null_methods_(0),
- num_boot_methods_(0) {
+ProfileSampleResults::ProfileSampleResults(Mutex& lock)
+ : lock_(lock),
+ num_samples_(0U),
+ num_null_methods_(0U),
+ num_boot_methods_(0U),
+ previous_num_samples_(0U),
+ previous_num_null_methods_(0U),
+ previous_num_boot_methods_(0U) {
for (int i = 0; i < kHashSize; i++) {
table[i] = nullptr;
}
diff --git a/runtime/profiler.h b/runtime/profiler.h
index 30babe3..bd29f71 100644
--- a/runtime/profiler.h
+++ b/runtime/profiler.h
@@ -243,7 +243,7 @@
public:
class ProfileData {
public:
- ProfileData() : count_(0), method_size_(0), used_percent_(0) {}
+ ProfileData() : count_(0), method_size_(0), used_percent_(0), top_k_used_percentage_(0) {}
ProfileData(const std::string& method_name, uint32_t count, uint32_t method_size,
double used_percent, double top_k_used_percentage) :
method_name_(method_name), count_(count), method_size_(method_size),
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index d1a4081..9d5ce9f 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -152,6 +152,9 @@
if (instrumentation->HasExceptionCaughtListeners()
&& self_->IsExceptionThrownByCurrentMethod(exception)) {
instrumentation->ExceptionCaughtEvent(self_, exception_ref.Get());
+ // Instrumentation may have been updated.
+ method_tracing_active_ = is_deoptimization_ ||
+ Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
}
}
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index ce9085d..e934834 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -85,7 +85,7 @@
Context* const context_;
const bool is_deoptimization_;
// Is method tracing active?
- const bool method_tracing_active_;
+ bool method_tracing_active_;
// Quick frame with found handler or last frame if no handler found.
ArtMethod** handler_quick_frame_;
// PC to branch to for the handler.
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 380e72b..bfa8c54 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -20,6 +20,7 @@
#include "runtime.h"
#include "art_method.h"
+#include "class_linker.h"
#include "read_barrier-inl.h"
namespace art {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1912314..49451ad 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -791,6 +791,12 @@
return failure_count;
}
+void Runtime::SetSentinel(mirror::Object* sentinel) {
+ CHECK(sentinel_.Read() == nullptr);
+ CHECK(sentinel != nullptr);
+ sentinel_ = GcRoot<mirror::Object>(sentinel);
+}
+
bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) {
ATRACE_BEGIN("Runtime::Init");
CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize);
@@ -1054,10 +1060,6 @@
CHECK(class_linker_ != nullptr);
- // Initialize the special sentinel_ value early.
- sentinel_ = GcRoot<mirror::Object>(class_linker_->AllocObject(self));
- CHECK(sentinel_.Read() != nullptr);
-
verifier::MethodVerifier::Init();
if (runtime_options.Exists(Opt::MethodTrace)) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 4577b75..bd21db1 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -568,6 +568,9 @@
return fingerprint_;
}
+ // Called from class linker.
+ void SetSentinel(mirror::Object* sentinel) SHARED_REQUIRES(Locks::mutator_lock_);
+
private:
static void InitPlatformSignalHandlers();
diff --git a/runtime/stack.cc b/runtime/stack.cc
index b07b244..a765a3f 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -299,7 +299,9 @@
return true;
}
case DexRegisterLocation::Kind::kInRegister:
- case DexRegisterLocation::Kind::kInFpuRegister: {
+ case DexRegisterLocation::Kind::kInRegisterHigh:
+ case DexRegisterLocation::Kind::kInFpuRegister:
+ case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
uint32_t reg =
dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info, encoding);
return GetRegisterIfAccessible(reg, kind, val);
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 0d3816b..07b79b5 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -59,26 +59,33 @@
/*
* The location kind used to populate the Dex register information in a
* StackMapStream can either be:
- * - kNone: the register has no location yet, meaning it has not been set;
+ * - kStack: vreg stored on the stack, value holds the stack offset;
+ * - kInRegister: vreg stored in low 32 bits of a core physical register,
+ * value holds the register number;
+ * - kInRegisterHigh: vreg stored in high 32 bits of a core physical register,
+ * value holds the register number;
+ * - kInFpuRegister: vreg stored in low 32 bits of an FPU register,
+ * value holds the register number;
+ * - kInFpuRegisterHigh: vreg stored in high 32 bits of an FPU register,
+ * value holds the register number;
* - kConstant: value holds the constant;
- * - kStack: value holds the stack offset;
- * - kRegister: value holds the physical register number;
- * - kFpuRegister: value holds the physical register number.
*
* In addition, DexRegisterMap also uses these values:
* - kInStackLargeOffset: value holds a "large" stack offset (greater than
* or equal to 128 bytes);
* - kConstantLargeValue: value holds a "large" constant (lower than 0, or
- * or greater than or equal to 32).
+ * or greater than or equal to 32);
+ * - kNone: the register has no location, meaning it has not been set.
*/
enum class Kind : uint8_t {
// Short location kinds, for entries fitting on one byte (3 bits
// for the kind, 5 bits for the value) in a DexRegisterMap.
- kNone = 0, // 0b000
- kInStack = 1, // 0b001
- kInRegister = 2, // 0b010
+ kInStack = 0, // 0b000
+ kInRegister = 1, // 0b001
+ kInRegisterHigh = 2, // 0b010
kInFpuRegister = 3, // 0b011
- kConstant = 4, // 0b100
+ kInFpuRegisterHigh = 4, // 0b100
+ kConstant = 5, // 0b101
// Large location kinds, requiring a 5-byte encoding (1 byte for the
// kind, 4 bytes for the value).
@@ -87,11 +94,14 @@
// divided by the stack frame slot size (4 bytes) cannot fit on a
// 5-bit unsigned integer (i.e., this offset value is greater than
// or equal to 2^5 * 4 = 128 bytes).
- kInStackLargeOffset = 5, // 0b101
+ kInStackLargeOffset = 6, // 0b110
// Large constant, that cannot fit on a 5-bit signed integer (i.e.,
// lower than 0, or greater than or equal to 2^5 = 32).
- kConstantLargeValue = 6, // 0b110
+ kConstantLargeValue = 7, // 0b111
+
+ // Entries with no location are not stored and do not need own marker.
+ kNone = static_cast<uint8_t>(-1),
kLastLocationKind = kConstantLargeValue
};
@@ -108,25 +118,29 @@
return "in stack";
case Kind::kInRegister:
return "in register";
+ case Kind::kInRegisterHigh:
+ return "in register high";
case Kind::kInFpuRegister:
return "in fpu register";
+ case Kind::kInFpuRegisterHigh:
+ return "in fpu register high";
case Kind::kConstant:
return "as constant";
case Kind::kInStackLargeOffset:
return "in stack (large offset)";
case Kind::kConstantLargeValue:
return "as constant (large value)";
- default:
- UNREACHABLE();
}
+ UNREACHABLE();
}
static bool IsShortLocationKind(Kind kind) {
switch (kind) {
- case Kind::kNone:
case Kind::kInStack:
case Kind::kInRegister:
+ case Kind::kInRegisterHigh:
case Kind::kInFpuRegister:
+ case Kind::kInFpuRegisterHigh:
case Kind::kConstant:
return true;
@@ -134,9 +148,10 @@
case Kind::kConstantLargeValue:
return false;
- default:
- UNREACHABLE();
+ case Kind::kNone:
+ LOG(FATAL) << "Unexpected location kind " << PrettyDescriptor(kind);
}
+ UNREACHABLE();
}
// Convert `kind` to a "surface" kind, i.e. one that doesn't include
@@ -144,10 +159,11 @@
// TODO: Introduce another enum type for the surface kind?
static Kind ConvertToSurfaceKind(Kind kind) {
switch (kind) {
- case Kind::kNone:
case Kind::kInStack:
case Kind::kInRegister:
+ case Kind::kInRegisterHigh:
case Kind::kInFpuRegister:
+ case Kind::kInFpuRegisterHigh:
case Kind::kConstant:
return kind;
@@ -157,9 +173,10 @@
case Kind::kConstantLargeValue:
return Kind::kConstant;
- default:
- UNREACHABLE();
+ case Kind::kNone:
+ return kind;
}
+ UNREACHABLE();
}
// Required by art::StackMapStream::LocationCatalogEntriesIndices.
@@ -305,55 +322,60 @@
// Compute the compressed kind of `location`.
static DexRegisterLocation::Kind ComputeCompressedKind(const DexRegisterLocation& location) {
- switch (location.GetInternalKind()) {
- case DexRegisterLocation::Kind::kNone:
- DCHECK_EQ(location.GetValue(), 0);
- return DexRegisterLocation::Kind::kNone;
-
- case DexRegisterLocation::Kind::kInRegister:
- DCHECK_GE(location.GetValue(), 0);
- DCHECK_LT(location.GetValue(), 1 << kValueBits);
- return DexRegisterLocation::Kind::kInRegister;
-
- case DexRegisterLocation::Kind::kInFpuRegister:
- DCHECK_GE(location.GetValue(), 0);
- DCHECK_LT(location.GetValue(), 1 << kValueBits);
- return DexRegisterLocation::Kind::kInFpuRegister;
-
+ DexRegisterLocation::Kind kind = location.GetInternalKind();
+ switch (kind) {
case DexRegisterLocation::Kind::kInStack:
return IsShortStackOffsetValue(location.GetValue())
? DexRegisterLocation::Kind::kInStack
: DexRegisterLocation::Kind::kInStackLargeOffset;
+ case DexRegisterLocation::Kind::kInRegister:
+ case DexRegisterLocation::Kind::kInRegisterHigh:
+ DCHECK_GE(location.GetValue(), 0);
+ DCHECK_LT(location.GetValue(), 1 << kValueBits);
+ return kind;
+
+ case DexRegisterLocation::Kind::kInFpuRegister:
+ case DexRegisterLocation::Kind::kInFpuRegisterHigh:
+ DCHECK_GE(location.GetValue(), 0);
+ DCHECK_LT(location.GetValue(), 1 << kValueBits);
+ return kind;
+
case DexRegisterLocation::Kind::kConstant:
return IsShortConstantValue(location.GetValue())
? DexRegisterLocation::Kind::kConstant
: DexRegisterLocation::Kind::kConstantLargeValue;
- default:
- LOG(FATAL) << "Unexpected location kind"
- << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
- UNREACHABLE();
+ case DexRegisterLocation::Kind::kConstantLargeValue:
+ case DexRegisterLocation::Kind::kInStackLargeOffset:
+ case DexRegisterLocation::Kind::kNone:
+ LOG(FATAL) << "Unexpected location kind " << DexRegisterLocation::PrettyDescriptor(kind);
}
+ UNREACHABLE();
}
// Can `location` be turned into a short location?
static bool CanBeEncodedAsShortLocation(const DexRegisterLocation& location) {
- switch (location.GetInternalKind()) {
- case DexRegisterLocation::Kind::kNone:
- case DexRegisterLocation::Kind::kInRegister:
- case DexRegisterLocation::Kind::kInFpuRegister:
- return true;
-
+ DexRegisterLocation::Kind kind = location.GetInternalKind();
+ switch (kind) {
case DexRegisterLocation::Kind::kInStack:
return IsShortStackOffsetValue(location.GetValue());
+ case DexRegisterLocation::Kind::kInRegister:
+ case DexRegisterLocation::Kind::kInRegisterHigh:
+ case DexRegisterLocation::Kind::kInFpuRegister:
+ case DexRegisterLocation::Kind::kInFpuRegisterHigh:
+ return true;
+
case DexRegisterLocation::Kind::kConstant:
return IsShortConstantValue(location.GetValue());
- default:
- UNREACHABLE();
+ case DexRegisterLocation::Kind::kConstantLargeValue:
+ case DexRegisterLocation::Kind::kInStackLargeOffset:
+ case DexRegisterLocation::Kind::kNone:
+ LOG(FATAL) << "Unexpected location kind " << DexRegisterLocation::PrettyDescriptor(kind);
}
+ UNREACHABLE();
}
static size_t EntrySize(const DexRegisterLocation& location) {
@@ -501,8 +523,10 @@
const StackMapEncoding& enc) const {
DexRegisterLocation location =
GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
- DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister
- || location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister)
+ DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister ||
+ location.GetInternalKind() == DexRegisterLocation::Kind::kInRegisterHigh ||
+ location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister ||
+ location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegisterHigh)
<< DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
return location.GetValue();
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 959af95..9bb57bf 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1202,7 +1202,8 @@
last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr),
- nested_signal_state(nullptr), flip_function(nullptr), method_verifier(nullptr) {
+ nested_signal_state(nullptr), flip_function(nullptr), method_verifier(nullptr),
+ thread_local_mark_stack(nullptr) {
std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
}
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 4393430..7579d8d 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -640,7 +640,8 @@
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
for (auto& e : seen_methods) {
DexIndexBitSet* bit_set = e.second;
- mirror::DexCache* dex_cache = class_linker->FindDexCache(*e.first);
+ // TODO: Visit trace methods as roots.
+ mirror::DexCache* dex_cache = class_linker->FindDexCache(*e.first, false);
for (uint32_t i = 0; i < bit_set->size(); ++i) {
if ((*bit_set)[i]) {
visited_methods->insert(dex_cache->GetResolvedMethod(i, sizeof(void*)));
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 1828b91..223268d 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -416,6 +416,7 @@
have_any_pending_runtime_throw_failure_(false),
new_instance_count_(0),
monitor_enter_count_(0),
+ encountered_failure_types_(0),
can_load_classes_(can_load_classes),
allow_soft_failures_(allow_soft_failures),
need_precise_constants_(need_precise_constants),
@@ -587,6 +588,9 @@
}
std::ostream& MethodVerifier::Fail(VerifyError error) {
+ // Mark the error type as encountered.
+ encountered_failure_types_ |= (1U << static_cast<uint32_t>(error));
+
switch (error) {
case VERIFY_ERROR_NO_CLASS:
case VERIFY_ERROR_NO_FIELD:
@@ -1387,13 +1391,15 @@
if (declaring_class.IsJavaLangObject()) {
// "this" is implicitly initialized.
reg_line->SetThisInitialized();
- reg_line->SetRegisterType(this, arg_start + cur_arg, declaring_class);
+ reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, declaring_class);
} else {
- reg_line->SetRegisterType(this, arg_start + cur_arg,
- reg_types_.UninitializedThisArgument(declaring_class));
+ reg_line->SetRegisterType<LockOp::kClear>(
+ this,
+ arg_start + cur_arg,
+ reg_types_.UninitializedThisArgument(declaring_class));
}
} else {
- reg_line->SetRegisterType(this, arg_start + cur_arg, declaring_class);
+ reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, declaring_class);
}
cur_arg++;
}
@@ -1425,26 +1431,26 @@
DCHECK(HasFailures());
return false;
}
- reg_line->SetRegisterType(this, arg_start + cur_arg, reg_type);
+ reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_type);
}
break;
case 'Z':
- reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Boolean());
+ reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Boolean());
break;
case 'C':
- reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Char());
+ reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Char());
break;
case 'B':
- reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Byte());
+ reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Byte());
break;
case 'I':
- reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Integer());
+ reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Integer());
break;
case 'S':
- reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Short());
+ reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Short());
break;
case 'F':
- reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Float());
+ reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Float());
break;
case 'J':
case 'D': {
@@ -1787,7 +1793,7 @@
* that as part of extracting the exception type from the catch block list.
*/
const RegType& res_type = GetCaughtExceptionType();
- work_line_->SetRegisterType(this, inst->VRegA_11x(), res_type);
+ work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_11x(), res_type);
break;
}
case Instruction::RETURN_VOID:
@@ -1887,26 +1893,26 @@
/* could be boolean, int, float, or a null reference */
case Instruction::CONST_4: {
int32_t val = static_cast<int32_t>(inst->VRegB_11n() << 28) >> 28;
- work_line_->SetRegisterType(this, inst->VRegA_11n(),
- DetermineCat1Constant(val, need_precise_constants_));
+ work_line_->SetRegisterType<LockOp::kClear>(
+ this, inst->VRegA_11n(), DetermineCat1Constant(val, need_precise_constants_));
break;
}
case Instruction::CONST_16: {
int16_t val = static_cast<int16_t>(inst->VRegB_21s());
- work_line_->SetRegisterType(this, inst->VRegA_21s(),
- DetermineCat1Constant(val, need_precise_constants_));
+ work_line_->SetRegisterType<LockOp::kClear>(
+ this, inst->VRegA_21s(), DetermineCat1Constant(val, need_precise_constants_));
break;
}
case Instruction::CONST: {
int32_t val = inst->VRegB_31i();
- work_line_->SetRegisterType(this, inst->VRegA_31i(),
- DetermineCat1Constant(val, need_precise_constants_));
+ work_line_->SetRegisterType<LockOp::kClear>(
+ this, inst->VRegA_31i(), DetermineCat1Constant(val, need_precise_constants_));
break;
}
case Instruction::CONST_HIGH16: {
int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
- work_line_->SetRegisterType(this, inst->VRegA_21h(),
- DetermineCat1Constant(val, need_precise_constants_));
+ work_line_->SetRegisterType<LockOp::kClear>(
+ this, inst->VRegA_21h(), DetermineCat1Constant(val, need_precise_constants_));
break;
}
/* could be long or double; resolved upon use */
@@ -1939,23 +1945,51 @@
break;
}
case Instruction::CONST_STRING:
- work_line_->SetRegisterType(this, inst->VRegA_21c(), reg_types_.JavaLangString());
+ work_line_->SetRegisterType<LockOp::kClear>(
+ this, inst->VRegA_21c(), reg_types_.JavaLangString());
break;
case Instruction::CONST_STRING_JUMBO:
- work_line_->SetRegisterType(this, inst->VRegA_31c(), reg_types_.JavaLangString());
+ work_line_->SetRegisterType<LockOp::kClear>(
+ this, inst->VRegA_31c(), reg_types_.JavaLangString());
break;
case Instruction::CONST_CLASS: {
// Get type from instruction if unresolved then we need an access check
// TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
// Register holds class, ie its type is class, on error it will hold Conflict.
- work_line_->SetRegisterType(this, inst->VRegA_21c(),
- res_type.IsConflict() ? res_type
- : reg_types_.JavaLangClass());
+ work_line_->SetRegisterType<LockOp::kClear>(
+ this, inst->VRegA_21c(), res_type.IsConflict() ? res_type
+ : reg_types_.JavaLangClass());
break;
}
case Instruction::MONITOR_ENTER:
work_line_->PushMonitor(this, inst->VRegA_11x(), work_insn_idx_);
+ // Check whether the previous instruction is a move-object with vAA as a source, creating
+ // untracked lock aliasing.
+ if (0 != work_insn_idx_ && !insn_flags_[work_insn_idx_].IsBranchTarget()) {
+ uint32_t prev_idx = work_insn_idx_ - 1;
+ while (0 != prev_idx && !insn_flags_[prev_idx].IsOpcode()) {
+ prev_idx--;
+ }
+ const Instruction* prev_inst = Instruction::At(code_item_->insns_ + prev_idx);
+ switch (prev_inst->Opcode()) {
+ case Instruction::MOVE_OBJECT:
+ case Instruction::MOVE_OBJECT_16:
+ case Instruction::MOVE_OBJECT_FROM16:
+ if (prev_inst->VRegB() == inst->VRegA_11x()) {
+ // Redo the copy. This won't change the register types, but update the lock status
+ // for the aliased register.
+ work_line_->CopyRegister1(this,
+ prev_inst->VRegA(),
+ prev_inst->VRegB(),
+ kTypeCategoryRef);
+ }
+ break;
+
+ default: // Other instruction types ignored.
+ break;
+ }
+ }
break;
case Instruction::MONITOR_EXIT:
/*
@@ -2006,7 +2040,9 @@
DCHECK_NE(failures_.size(), 0U);
if (!is_checkcast) {
- work_line_->SetRegisterType(this, inst->VRegA_22c(), reg_types_.Boolean());
+ work_line_->SetRegisterType<LockOp::kClear>(this,
+ inst->VRegA_22c(),
+ reg_types_.Boolean());
}
break; // bad class
}
@@ -2027,9 +2063,11 @@
}
} else {
if (is_checkcast) {
- work_line_->SetRegisterType(this, inst->VRegA_21c(), res_type);
+ work_line_->SetRegisterType<LockOp::kKeep>(this, inst->VRegA_21c(), res_type);
} else {
- work_line_->SetRegisterType(this, inst->VRegA_22c(), reg_types_.Boolean());
+ work_line_->SetRegisterType<LockOp::kClear>(this,
+ inst->VRegA_22c(),
+ reg_types_.Boolean());
}
}
break;
@@ -2040,7 +2078,9 @@
if (!res_type.IsArrayTypes() && !res_type.IsZero()) { // ie not an array or null
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
} else {
- work_line_->SetRegisterType(this, inst->VRegA_12x(), reg_types_.Integer());
+ work_line_->SetRegisterType<LockOp::kClear>(this,
+ inst->VRegA_12x(),
+ reg_types_.Integer());
}
} else {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
@@ -2065,7 +2105,7 @@
// initialized must be marked invalid.
work_line_->MarkUninitRefsAsInvalid(this, uninit_type);
// add the new uninitialized reference to the register state
- work_line_->SetRegisterType(this, inst->VRegA_21c(), uninit_type);
+ work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_21c(), uninit_type);
break;
}
case Instruction::NEW_ARRAY:
@@ -2087,7 +2127,7 @@
if (!work_line_->VerifyRegisterType(this, inst->VRegC_23x(), reg_types_.Float())) {
break;
}
- work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Integer());
+ work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_23x(), reg_types_.Integer());
break;
case Instruction::CMPL_DOUBLE:
case Instruction::CMPG_DOUBLE:
@@ -2099,7 +2139,7 @@
reg_types_.DoubleHi())) {
break;
}
- work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Integer());
+ work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_23x(), reg_types_.Integer());
break;
case Instruction::CMP_LONG:
if (!work_line_->VerifyRegisterTypeWide(this, inst->VRegB_23x(), reg_types_.LongLo(),
@@ -2110,7 +2150,7 @@
reg_types_.LongHi())) {
break;
}
- work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Integer());
+ work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_23x(), reg_types_.Integer());
break;
case Instruction::THROW: {
const RegType& res_type = work_line_->GetRegisterType(this, inst->VRegA_11x());
@@ -2265,7 +2305,9 @@
branch_line.reset(update_line);
}
update_line->CopyFromLine(work_line_.get());
- update_line->SetRegisterType(this, instance_of_inst->VRegB_22c(), cast_type);
+ update_line->SetRegisterType<LockOp::kKeep>(this,
+ instance_of_inst->VRegB_22c(),
+ cast_type);
if (!insn_flags_[instance_of_idx].IsBranchTarget() && 0 != instance_of_idx) {
// See if instance-of was preceded by a move-object operation, common due to the small
// register encoding space of instance-of, and propagate type information to the source
@@ -2283,17 +2325,23 @@
switch (move_inst->Opcode()) {
case Instruction::MOVE_OBJECT:
if (move_inst->VRegA_12x() == instance_of_inst->VRegB_22c()) {
- update_line->SetRegisterType(this, move_inst->VRegB_12x(), cast_type);
+ update_line->SetRegisterType<LockOp::kKeep>(this,
+ move_inst->VRegB_12x(),
+ cast_type);
}
break;
case Instruction::MOVE_OBJECT_FROM16:
if (move_inst->VRegA_22x() == instance_of_inst->VRegB_22c()) {
- update_line->SetRegisterType(this, move_inst->VRegB_22x(), cast_type);
+ update_line->SetRegisterType<LockOp::kKeep>(this,
+ move_inst->VRegB_22x(),
+ cast_type);
}
break;
case Instruction::MOVE_OBJECT_16:
if (move_inst->VRegA_32x() == instance_of_inst->VRegB_22c()) {
- update_line->SetRegisterType(this, move_inst->VRegB_32x(), cast_type);
+ update_line->SetRegisterType<LockOp::kKeep>(this,
+ move_inst->VRegB_32x(),
+ cast_type);
}
break;
default:
@@ -2984,7 +3032,7 @@
// is good enough for some other verification to occur without hard-failing.
const uint32_t vreg_target_object = inst->VRegA_22x(); // box-lambda vA, vB
const RegType& reg_type = reg_types_.JavaLangObject(need_precise_constants_);
- work_line_->SetRegisterType(this, vreg_target_object, reg_type);
+ work_line_->SetRegisterType<LockOp::kClear>(this, vreg_target_object, reg_type);
break;
}
@@ -3335,6 +3383,7 @@
ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(
uint32_t dex_method_idx, MethodType method_type) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
+ // LOG(INFO) << dex_file_->NumTypeIds() << " " << dex_file_->NumClassDefs();
const RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
if (klass_type.IsConflict()) {
std::string append(" in attempt to access method ");
@@ -3813,7 +3862,7 @@
work_line_->VerifyRegisterType(this, inst->VRegB_22c(), reg_types_.Integer());
/* set register type to array class */
const RegType& precise_type = reg_types_.FromUninitialized(res_type);
- work_line_->SetRegisterType(this, inst->VRegA_22c(), precise_type);
+ work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_22c(), precise_type);
} else {
// Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
// the list and fail. It's legal, if silly, for arg_count to be zero.
@@ -3850,7 +3899,7 @@
// instruction type. TODO: have a proper notion of bottom here.
if (!is_primitive || insn_type.IsCategory1Types()) {
// Reference or category 1
- work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Zero());
+ work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_23x(), reg_types_.Zero());
} else {
// Category 2
work_line_->SetRegisterTypeWide(this, inst->VRegA_23x(),
@@ -3878,7 +3927,7 @@
// instruction, which can't differentiate object types and ints from floats, longs from
// doubles.
if (!component_type.IsLowHalf()) {
- work_line_->SetRegisterType(this, inst->VRegA_23x(), component_type);
+ work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_23x(), component_type);
} else {
work_line_->SetRegisterTypeWide(this, inst->VRegA_23x(), component_type,
component_type.HighHalf(®_types_));
@@ -4182,13 +4231,13 @@
<< "' but found type '" << *field_type
<< "' in get-object";
if (error != VERIFY_ERROR_BAD_CLASS_HARD) {
- work_line_->SetRegisterType(this, vregA, reg_types_.Conflict());
+ work_line_->SetRegisterType<LockOp::kClear>(this, vregA, reg_types_.Conflict());
}
return;
}
}
if (!field_type->IsLowHalf()) {
- work_line_->SetRegisterType(this, vregA, *field_type);
+ work_line_->SetRegisterType<LockOp::kClear>(this, vregA, *field_type);
} else {
work_line_->SetRegisterTypeWide(this, vregA, *field_type, field_type->HighHalf(®_types_));
}
@@ -4331,12 +4380,12 @@
<< " to be compatible with type '" << insn_type
<< "' but found type '" << *field_type
<< "' in get-object";
- work_line_->SetRegisterType(this, vregA, reg_types_.Conflict());
+ work_line_->SetRegisterType<LockOp::kClear>(this, vregA, reg_types_.Conflict());
return;
}
}
if (!field_type->IsLowHalf()) {
- work_line_->SetRegisterType(this, vregA, *field_type);
+ work_line_->SetRegisterType<LockOp::kClear>(this, vregA, *field_type);
} else {
work_line_->SetRegisterTypeWide(this, vregA, *field_type, field_type->HighHalf(®_types_));
}
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 21f8543..d0841f0 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -67,17 +67,17 @@
* to be rewritten to fail at runtime.
*/
enum VerifyError {
- VERIFY_ERROR_BAD_CLASS_HARD, // VerifyError; hard error that skips compilation.
- VERIFY_ERROR_BAD_CLASS_SOFT, // VerifyError; soft error that verifies again at runtime.
+ VERIFY_ERROR_BAD_CLASS_HARD = 0, // VerifyError; hard error that skips compilation.
+ VERIFY_ERROR_BAD_CLASS_SOFT = 1, // VerifyError; soft error that verifies again at runtime.
- VERIFY_ERROR_NO_CLASS, // NoClassDefFoundError.
- VERIFY_ERROR_NO_FIELD, // NoSuchFieldError.
- VERIFY_ERROR_NO_METHOD, // NoSuchMethodError.
- VERIFY_ERROR_ACCESS_CLASS, // IllegalAccessError.
- VERIFY_ERROR_ACCESS_FIELD, // IllegalAccessError.
- VERIFY_ERROR_ACCESS_METHOD, // IllegalAccessError.
- VERIFY_ERROR_CLASS_CHANGE, // IncompatibleClassChangeError.
- VERIFY_ERROR_INSTANTIATION, // InstantiationError.
+ VERIFY_ERROR_NO_CLASS = 2, // NoClassDefFoundError.
+ VERIFY_ERROR_NO_FIELD = 3, // NoSuchFieldError.
+ VERIFY_ERROR_NO_METHOD = 4, // NoSuchMethodError.
+ VERIFY_ERROR_ACCESS_CLASS = 5, // IllegalAccessError.
+ VERIFY_ERROR_ACCESS_FIELD = 6, // IllegalAccessError.
+ VERIFY_ERROR_ACCESS_METHOD = 7, // IllegalAccessError.
+ VERIFY_ERROR_CLASS_CHANGE = 8, // IncompatibleClassChangeError.
+ VERIFY_ERROR_INSTANTIATION = 9, // InstantiationError.
// For opcodes that don't have complete verifier support (such as lambda opcodes),
// we need a way to continue execution at runtime without attempting to re-verify
// (since we know it will fail no matter what). Instead, run as the interpreter
@@ -85,25 +85,12 @@
// on the fly.
//
// TODO: Once all new opcodes have implemented full verifier support, this can be removed.
- VERIFY_ERROR_FORCE_INTERPRETER, // Skip the verification phase at runtime;
- // force the interpreter to do access checks.
- // (sets a soft fail at compile time).
+ VERIFY_ERROR_FORCE_INTERPRETER = 10, // Skip the verification phase at runtime;
+ // force the interpreter to do access checks.
+ // (sets a soft fail at compile time).
};
std::ostream& operator<<(std::ostream& os, const VerifyError& rhs);
-/*
- * Identifies the type of reference in the instruction that generated the verify error
- * (e.g. VERIFY_ERROR_ACCESS_CLASS could come from a method, field, or class reference).
- *
- * This must fit in two bits.
- */
-enum VerifyErrorRefType {
- VERIFY_ERROR_REF_CLASS = 0,
- VERIFY_ERROR_REF_FIELD = 1,
- VERIFY_ERROR_REF_METHOD = 2,
-};
-const int kVerifyErrorRefTypeShift = 6;
-
// We don't need to store the register data for many instructions, because we either only need
// it at branch points (for verification) or GC points and branches (for verification +
// type-precise register analysis).
@@ -291,6 +278,10 @@
return string_init_pc_reg_map_;
}
+ uint32_t GetEncounteredFailureTypes() {
+ return encountered_failure_types_;
+ }
+
private:
// Private constructor for dumping.
MethodVerifier(Thread* self, const DexFile* dex_file, Handle<mirror::DexCache> dex_cache,
@@ -753,6 +744,9 @@
size_t new_instance_count_;
size_t monitor_enter_count_;
+ // Bitset of the encountered failure types. Bits are according to the values in VerifyError.
+ uint32_t encountered_failure_types_;
+
const bool can_load_classes_;
// Converts soft failures to hard failures when false. Only false when the compiler isn't
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 9cd2bdf..bee5834 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -31,6 +31,7 @@
return verifier->GetRegTypeCache()->GetFromId(line_[vsrc]);
}
+template <LockOp kLockOp>
inline bool RegisterLine::SetRegisterType(MethodVerifier* verifier, uint32_t vdst,
const RegType& new_type) {
DCHECK_LT(vdst, num_regs_);
@@ -43,8 +44,16 @@
// as they are not accessed, and our backends can handle this nowadays.
line_[vdst] = new_type.GetId();
}
- // Clear the monitor entry bits for this register.
- ClearAllRegToLockDepths(vdst);
+ switch (kLockOp) {
+ case LockOp::kClear:
+ // Clear the monitor entry bits for this register.
+ ClearAllRegToLockDepths(vdst);
+ break;
+ case LockOp::kKeep:
+ // Should only be doing this with reference types.
+ DCHECK(new_type.IsReferenceTypes());
+ break;
+ }
return true;
}
@@ -89,7 +98,7 @@
TypeCategory cat) {
DCHECK(cat == kTypeCategory1nr || cat == kTypeCategoryRef);
const RegType& type = GetRegisterType(verifier, vsrc);
- if (!SetRegisterType(verifier, vdst, type)) {
+ if (!SetRegisterType<LockOp::kClear>(verifier, vdst, type)) {
return;
}
if (!type.IsConflict() && // Allow conflicts to be copied around.
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index f286a45..bb6df76 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -155,6 +155,9 @@
for (const auto& monitor : monitors_) {
result += StringPrintf("{%d},", monitor);
}
+ for (auto& pairs : reg_to_lock_depths_) {
+ result += StringPrintf("<%d -> %x>", pairs.first, pairs.second);
+ }
return result;
}
@@ -175,7 +178,7 @@
<< "copyRes1 v" << vdst << "<- result0" << " type=" << type;
} else {
DCHECK(verifier->GetRegTypeCache()->GetFromId(result_[1]).IsUndefined());
- SetRegisterType(verifier, vdst, type);
+ SetRegisterType<LockOp::kClear>(verifier, vdst, type);
result_[0] = verifier->GetRegTypeCache()->Undefined().GetId();
}
}
@@ -201,7 +204,7 @@
void RegisterLine::CheckUnaryOp(MethodVerifier* verifier, const Instruction* inst,
const RegType& dst_type, const RegType& src_type) {
if (VerifyRegisterType(verifier, inst->VRegB_12x(), src_type)) {
- SetRegisterType(verifier, inst->VRegA_12x(), dst_type);
+ SetRegisterType<LockOp::kClear>(verifier, inst->VRegA_12x(), dst_type);
}
}
@@ -225,7 +228,7 @@
const RegType& dst_type,
const RegType& src_type1, const RegType& src_type2) {
if (VerifyRegisterTypeWide(verifier, inst->VRegB_12x(), src_type1, src_type2)) {
- SetRegisterType(verifier, inst->VRegA_12x(), dst_type);
+ SetRegisterType<LockOp::kClear>(verifier, inst->VRegA_12x(), dst_type);
}
}
@@ -241,11 +244,13 @@
DCHECK(dst_type.IsInteger());
if (GetRegisterType(verifier, vregB).IsBooleanTypes() &&
GetRegisterType(verifier, vregC).IsBooleanTypes()) {
- SetRegisterType(verifier, inst->VRegA_23x(), verifier->GetRegTypeCache()->Boolean());
+ SetRegisterType<LockOp::kClear>(verifier,
+ inst->VRegA_23x(),
+ verifier->GetRegTypeCache()->Boolean());
return;
}
}
- SetRegisterType(verifier, inst->VRegA_23x(), dst_type);
+ SetRegisterType<LockOp::kClear>(verifier, inst->VRegA_23x(), dst_type);
}
}
@@ -279,11 +284,13 @@
DCHECK(dst_type.IsInteger());
if (GetRegisterType(verifier, vregA).IsBooleanTypes() &&
GetRegisterType(verifier, vregB).IsBooleanTypes()) {
- SetRegisterType(verifier, vregA, verifier->GetRegTypeCache()->Boolean());
+ SetRegisterType<LockOp::kClear>(verifier,
+ vregA,
+ verifier->GetRegTypeCache()->Boolean());
return;
}
}
- SetRegisterType(verifier, vregA, dst_type);
+ SetRegisterType<LockOp::kClear>(verifier, vregA, dst_type);
}
}
@@ -321,11 +328,13 @@
/* check vB with the call, then check the constant manually */
const uint32_t val = is_lit16 ? inst->VRegC_22s() : inst->VRegC_22b();
if (GetRegisterType(verifier, vregB).IsBooleanTypes() && (val == 0 || val == 1)) {
- SetRegisterType(verifier, vregA, verifier->GetRegTypeCache()->Boolean());
+ SetRegisterType<LockOp::kClear>(verifier,
+ vregA,
+ verifier->GetRegTypeCache()->Boolean());
return;
}
}
- SetRegisterType(verifier, vregA, dst_type);
+ SetRegisterType<LockOp::kClear>(verifier, vregA, dst_type);
}
}
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index a9c4c95..41f1e28 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -47,6 +47,12 @@
kTypeCategoryRef = 3, // object reference
};
+// What to do with the lock levels when setting the register type.
+enum class LockOp {
+ kClear, // Clear the lock levels recorded.
+ kKeep // Leave the lock levels alone.
+};
+
// During verification, we associate one of these with every "interesting" instruction. We track
// the status of all registers, and (if the method has any monitor-enter instructions) maintain a
// stack of entered monitors (identified by code unit offset).
@@ -83,6 +89,15 @@
// Set the type of register N, verifying that the register is valid. If "newType" is the "Lo"
// part of a 64-bit value, register N+1 will be set to "newType+1".
// The register index was validated during the static pass, so we don't need to check it here.
+ //
+ // LockOp::kClear should be used by default; it will clear the lock levels associated with the
+ // register. An example is setting the register type because an instruction writes to the
+ // register.
+ // LockOp::kKeep keeps the lock levels of the register and only changes the register type. This
+ // is typical when the underlying value did not change, but we have "different" type information
+ // available now. An example is sharpening types after a check-cast. Note that when given kKeep,
+ // the new_type is dchecked to be a reference type.
+ template <LockOp kLockOp>
ALWAYS_INLINE bool SetRegisterType(MethodVerifier* verifier, uint32_t vdst,
const RegType& new_type)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 0c7cce9..e2c3afb 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -35,6 +35,7 @@
jclass WellKnownClasses::dalvik_system_DexPathList__Element;
jclass WellKnownClasses::dalvik_system_PathClassLoader;
jclass WellKnownClasses::dalvik_system_VMRuntime;
+jclass WellKnownClasses::java_lang_annotation_Annotation__array;
jclass WellKnownClasses::java_lang_BootClassLoader;
jclass WellKnownClasses::java_lang_ClassLoader;
jclass WellKnownClasses::java_lang_ClassNotFoundException;
@@ -59,6 +60,8 @@
jclass WellKnownClasses::java_nio_DirectByteBuffer;
jclass WellKnownClasses::java_util_ArrayList;
jclass WellKnownClasses::java_util_Collections;
+jclass WellKnownClasses::libcore_reflect_AnnotationFactory;
+jclass WellKnownClasses::libcore_reflect_AnnotationMember;
jclass WellKnownClasses::libcore_util_EmptyArray;
jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk;
jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer;
@@ -120,6 +123,8 @@
jmethodID WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler_uncaughtException;
jmethodID WellKnownClasses::java_lang_ThreadGroup_removeThread;
jmethodID WellKnownClasses::java_nio_DirectByteBuffer_init;
+jmethodID WellKnownClasses::libcore_reflect_AnnotationFactory_createAnnotation;
+jmethodID WellKnownClasses::libcore_reflect_AnnotationMember_init;
jmethodID WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast;
jmethodID WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch;
@@ -213,6 +218,7 @@
dalvik_system_PathClassLoader = CacheClass(env, "dalvik/system/PathClassLoader");
dalvik_system_VMRuntime = CacheClass(env, "dalvik/system/VMRuntime");
+ java_lang_annotation_Annotation__array = CacheClass(env, "[Ljava/lang/annotation/Annotation;");
java_lang_BootClassLoader = CacheClass(env, "java/lang/BootClassLoader");
java_lang_ClassLoader = CacheClass(env, "java/lang/ClassLoader");
java_lang_ClassNotFoundException = CacheClass(env, "java/lang/ClassNotFoundException");
@@ -238,6 +244,8 @@
java_nio_DirectByteBuffer = CacheClass(env, "java/nio/DirectByteBuffer");
java_util_ArrayList = CacheClass(env, "java/util/ArrayList");
java_util_Collections = CacheClass(env, "java/util/Collections");
+ libcore_reflect_AnnotationFactory = CacheClass(env, "libcore/reflect/AnnotationFactory");
+ libcore_reflect_AnnotationMember = CacheClass(env, "libcore/reflect/AnnotationMember");
libcore_util_EmptyArray = CacheClass(env, "libcore/util/EmptyArray");
org_apache_harmony_dalvik_ddmc_Chunk = CacheClass(env, "org/apache/harmony/dalvik/ddmc/Chunk");
org_apache_harmony_dalvik_ddmc_DdmServer = CacheClass(env, "org/apache/harmony/dalvik/ddmc/DdmServer");
@@ -262,6 +270,8 @@
java_lang_Thread__UncaughtExceptionHandler_uncaughtException = CacheMethod(env, java_lang_Thread__UncaughtExceptionHandler, false, "uncaughtException", "(Ljava/lang/Thread;Ljava/lang/Throwable;)V");
java_lang_ThreadGroup_removeThread = CacheMethod(env, java_lang_ThreadGroup, false, "removeThread", "(Ljava/lang/Thread;)V");
java_nio_DirectByteBuffer_init = CacheMethod(env, java_nio_DirectByteBuffer, false, "<init>", "(JI)V");
+ libcore_reflect_AnnotationFactory_createAnnotation = CacheMethod(env, libcore_reflect_AnnotationFactory, true, "createAnnotation", "(Ljava/lang/Class;[Llibcore/reflect/AnnotationMember;)Ljava/lang/annotation/Annotation;");
+ libcore_reflect_AnnotationMember_init = CacheMethod(env, libcore_reflect_AnnotationMember, false, "<init>", "(Ljava/lang/String;Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/reflect/Method;)V");
org_apache_harmony_dalvik_ddmc_DdmServer_broadcast = CacheMethod(env, org_apache_harmony_dalvik_ddmc_DdmServer, true, "broadcast", "(I)V");
org_apache_harmony_dalvik_ddmc_DdmServer_dispatch = CacheMethod(env, org_apache_harmony_dalvik_ddmc_DdmServer, true, "dispatch", "(I[BII)Lorg/apache/harmony/dalvik/ddmc/Chunk;");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 6dd8168..c856291 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -46,6 +46,7 @@
static jclass dalvik_system_DexPathList__Element;
static jclass dalvik_system_PathClassLoader;
static jclass dalvik_system_VMRuntime;
+ static jclass java_lang_annotation_Annotation__array;
static jclass java_lang_BootClassLoader;
static jclass java_lang_ClassLoader;
static jclass java_lang_ClassNotFoundException;
@@ -70,6 +71,8 @@
static jclass java_util_ArrayList;
static jclass java_util_Collections;
static jclass java_nio_DirectByteBuffer;
+ static jclass libcore_reflect_AnnotationFactory;
+ static jclass libcore_reflect_AnnotationMember;
static jclass libcore_util_EmptyArray;
static jclass org_apache_harmony_dalvik_ddmc_Chunk;
static jclass org_apache_harmony_dalvik_ddmc_DdmServer;
@@ -131,6 +134,8 @@
static jmethodID java_lang_Thread__UncaughtExceptionHandler_uncaughtException;
static jmethodID java_lang_ThreadGroup_removeThread;
static jmethodID java_nio_DirectByteBuffer_init;
+ static jmethodID libcore_reflect_AnnotationFactory_createAnnotation;
+ static jmethodID libcore_reflect_AnnotationMember_init;
static jmethodID org_apache_harmony_dalvik_ddmc_DdmServer_broadcast;
static jmethodID org_apache_harmony_dalvik_ddmc_DdmServer_dispatch;
diff --git a/test/004-SignalTest/signaltest.cc b/test/004-SignalTest/signaltest.cc
index 1414715..6dd6355 100644
--- a/test/004-SignalTest/signaltest.cc
+++ b/test/004-SignalTest/signaltest.cc
@@ -62,9 +62,12 @@
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
sc->pc += 4; // Skip instruction causing segv.
-#elif defined(__i386__) || defined(__x86_64__)
+#elif defined(__i386__)
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
uc->CTX_EIP += 3;
+#elif defined(__x86_64__)
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ uc->CTX_EIP += 2;
#else
UNUSED(context);
#endif
@@ -93,13 +96,16 @@
char *go_away_compiler = nullptr;
extern "C" JNIEXPORT jint JNICALL Java_Main_testSignal(JNIEnv*, jclass) {
-#if defined(__arm__) || defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+#if defined(__arm__) || defined(__i386__) || defined(__aarch64__)
// On supported architectures we cause a real SEGV.
*go_away_compiler = 'a';
+#elif defined(__x86_64__)
+ // Cause a SEGV using an instruction known to be 2 bytes long to account for hardcoded jump
+ // in the signal handler
+ asm volatile("movl $0, %%eax;" "movb %%ah, (%%rax);" : : : "%eax");
#else
// On other architectures we simulate SEGV.
kill(getpid(), SIGSEGV);
#endif
return 1234;
}
-
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index 04326b3..948273a 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -206,8 +206,9 @@
#if defined(__arm__) || defined(__i386__) || defined(__aarch64__)
*go_away_compiler = 'a';
#elif defined(__x86_64__)
- // Cause a SEGV using an instruction known to be 3 bytes long
- asm volatile("movl $0, %%eax;" "movb $1, (%%eax);" : : : "%eax");
+ // Cause a SEGV using an instruction known to be 2 bytes long to account for hardcoded jump
+ // in the signal handler
+ asm volatile("movl $0, %%eax;" "movb %%ah, (%%rax);" : : : "%eax");
#else
// On other architectures we simulate SEGV.
kill(getpid(), SIGSEGV);
@@ -402,9 +403,12 @@
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
sc->pc += 4; // Skip instruction causing segv & sigill.
-#elif defined(__i386__) || defined(__x86_64__)
+#elif defined(__i386__)
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
uc->CTX_EIP += 3;
+#elif defined(__x86_64__)
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ uc->CTX_EIP += 2;
#else
UNUSED(context);
#endif
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 21a8171..6568eac 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -1,6 +1,5 @@
PackedSwitch
b/17790197
-b/17978759
FloatBadArgReg
negLong
sameFieldNames
@@ -42,4 +41,7 @@
b/23201502 (float)
b/23201502 (double)
b/23300986
+b/23300986 (2)
+b/23502994 (if-eqz)
+b/23502994 (check-cast)
Done!
diff --git a/test/800-smali/smali/b_17978759.smali b/test/800-smali/smali/b_17978759.smali
deleted file mode 100644
index 07bcae5..0000000
--- a/test/800-smali/smali/b_17978759.smali
+++ /dev/null
@@ -1,28 +0,0 @@
-.class public LB17978759;
-.super Ljava/lang/Object;
-
- .method public constructor <init>()V
- .registers 1
- invoke-direct {p0}, Ljava/lang/Object;-><init>()V
- return-void
- .end method
-
- .method public test()V
- .registers 2
-
- move-object v0, p0
- # v0 and p0 alias
- monitor-enter p0
- # monitor-enter on p0
- monitor-exit v0
- # monitor-exit on v0, however, verifier doesn't track this and so this is
- # a warning. Verifier will still think p0 is locked.
-
- move-object v0, p0
- # v0 will now appear locked.
- monitor-enter v0
- # Attempt to lock v0 twice is a verifier failure.
- monitor-exit v0
-
- return-void
- .end method
diff --git a/test/800-smali/smali/b_23300986.smali b/test/800-smali/smali/b_23300986.smali
index 5ed8e5e..f008b92 100644
--- a/test/800-smali/smali/b_23300986.smali
+++ b/test/800-smali/smali/b_23300986.smali
@@ -11,3 +11,13 @@
monitor-exit v1
return-void
.end method
+
+.method public static runAliasBeforeEnter(Ljava/lang/Object;)V
+ .registers 3
+ move-object v1, v2 # Copy parameter into v1, establishing an alias.
+ monitor-enter v2 # Lock on parameter
+ monitor-exit v1 # Unlock on alias
+ monitor-enter v2 # Do it again.
+ monitor-exit v1
+ return-void
+.end method
diff --git a/test/800-smali/smali/b_23502994.smali b/test/800-smali/smali/b_23502994.smali
new file mode 100644
index 0000000..d1d0554
--- /dev/null
+++ b/test/800-smali/smali/b_23502994.smali
@@ -0,0 +1,45 @@
+.class public LB23502994;
+
+.super Ljava/lang/Object;
+
+.method public static runIF_EQZ(Ljava/lang/Object;)V
+ .registers 3
+ monitor-enter v2 # Lock on parameter
+
+ # Sharpen, and try to unlock (in both branches). We should not lose the lock info when we make
+ # the register type more precise.
+
+ instance-of v0, v2, Ljava/lang/String;
+ if-eqz v0, :LnotString
+
+ # At this point v2 is of type Ljava/lang/String;
+ monitor-exit v2
+
+ goto :Lend
+
+:LnotString
+ monitor-exit v2 # Unlock the else branch
+
+ # Fall-through.
+
+:Lend
+ return-void
+
+.end method
+
+
+.method public static runCHECKCAST(Ljava/lang/Object;)V
+ .registers 3
+ monitor-enter v2 # Lock on parameter
+
+ # Sharpen, and try to unlock. We should not lose the lock info when we make the register type
+ # more precise.
+
+ check-cast v2, Ljava/lang/String;
+
+ # At this point v2 is of type Ljava/lang/String;
+ monitor-exit v2
+
+ return-void
+
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index a89b849..ba4990a 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -53,8 +53,6 @@
new Object[]{123}, null, 123));
testCases.add(new TestCase("b/17790197", "B17790197", "getInt", null, null, 100));
- testCases.add(new TestCase("b/17978759", "B17978759", "test", null, new VerifyError(),
- null));
testCases.add(new TestCase("FloatBadArgReg", "FloatBadArgReg", "getInt",
new Object[]{100}, null, 100));
testCases.add(new TestCase("negLong", "negLong", "negLong", null, null, 122142L));
@@ -129,6 +127,12 @@
new NullPointerException(), null));
testCases.add(new TestCase("b/23300986", "B23300986", "runAliasAfterEnter",
new Object[] { new Object() }, null, null));
+ testCases.add(new TestCase("b/23300986 (2)", "B23300986", "runAliasBeforeEnter",
+ new Object[] { new Object() }, null, null));
+ testCases.add(new TestCase("b/23502994 (if-eqz)", "B23502994", "runIF_EQZ",
+ new Object[] { new Object() }, null, null));
+ testCases.add(new TestCase("b/23502994 (check-cast)", "B23502994", "runCHECKCAST",
+ new Object[] { "abc" }, null, null));
}
public void runTests() {