Merge "Fix handling of dex cache arrays for method types."
diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h
index 4dcaf80..dec9c83 100644
--- a/cmdline/cmdline.h
+++ b/cmdline/cmdline.h
@@ -293,7 +293,7 @@
template <typename Args = CmdlineArgs>
struct CmdlineMain {
int Main(int argc, char** argv) {
- InitLogging(argv);
+ InitLogging(argv, Runtime::Aborter);
std::unique_ptr<Args> args = std::unique_ptr<Args>(CreateArguments());
args_ = args.get();
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 5809dcd..cad5104 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -122,7 +122,7 @@
using RuntimeParser = ParsedOptions::RuntimeParser;
static void SetUpTestCase() {
- art::InitLogging(nullptr); // argv = null
+ art::InitLogging(nullptr, art::Runtime::Aborter); // argv = null
}
virtual void SetUp() {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 8d64c65..afaec52 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1134,6 +1134,7 @@
VLOG(compiler) << "Adding " << descriptor << " to image classes";
for (size_t i = 0; i < klass->NumDirectInterfaces(); ++i) {
StackHandleScope<1> hs2(self);
+ // May cause thread suspension.
MaybeAddToImageClasses(hs2.NewHandle(mirror::Class::GetDirectInterface(self, klass, i)),
image_classes);
}
@@ -1153,15 +1154,14 @@
// Note: we can use object pointers because we suspend all threads.
class ClinitImageUpdate {
public:
- static ClinitImageUpdate* Create(std::unordered_set<std::string>* image_class_descriptors,
- Thread* self, ClassLinker* linker, std::string* error_msg) {
- std::unique_ptr<ClinitImageUpdate> res(new ClinitImageUpdate(image_class_descriptors, self,
+ static ClinitImageUpdate* Create(VariableSizedHandleScope& hs,
+ std::unordered_set<std::string>* image_class_descriptors,
+ Thread* self,
+ ClassLinker* linker) {
+ std::unique_ptr<ClinitImageUpdate> res(new ClinitImageUpdate(hs,
+ image_class_descriptors,
+ self,
linker));
- if (res->dex_cache_class_ == nullptr) {
- *error_msg = "Could not find DexCache class.";
- return nullptr;
- }
-
return res.release();
}
@@ -1171,7 +1171,9 @@
}
// Visitor for VisitReferences.
- void operator()(mirror::Object* object, MemberOffset field_offset, bool /* is_static */) const
+ void operator()(ObjPtr<mirror::Object> object,
+ MemberOffset field_offset,
+ bool /* is_static */) const
REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset);
if (ref != nullptr) {
@@ -1180,8 +1182,8 @@
}
// java.lang.Reference visitor for VisitReferences.
- void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref ATTRIBUTE_UNUSED)
- const {}
+ void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const {}
// Ignore class native roots.
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
@@ -1193,6 +1195,9 @@
for (mirror::Class* klass_root : image_classes_) {
VisitClinitClassesObject(klass_root);
}
+ for (Handle<mirror::Class> h_klass : to_insert_) {
+ MaybeAddToImageClasses(h_klass, image_class_descriptors_);
+ }
}
private:
@@ -1219,20 +1224,19 @@
ClinitImageUpdate* const data_;
};
- ClinitImageUpdate(std::unordered_set<std::string>* image_class_descriptors, Thread* self,
- ClassLinker* linker)
- REQUIRES_SHARED(Locks::mutator_lock_) :
- image_class_descriptors_(image_class_descriptors), self_(self) {
+ ClinitImageUpdate(VariableSizedHandleScope& hs,
+ std::unordered_set<std::string>* image_class_descriptors,
+ Thread* self,
+ ClassLinker* linker) REQUIRES_SHARED(Locks::mutator_lock_)
+ : hs_(hs),
+ image_class_descriptors_(image_class_descriptors),
+ self_(self) {
CHECK(linker != nullptr);
CHECK(image_class_descriptors != nullptr);
// Make sure nobody interferes with us.
old_cause_ = self->StartAssertNoThreadSuspension("Boot image closure");
- // Find the interesting classes.
- dex_cache_class_ = linker->LookupClass(self, "Ljava/lang/DexCache;",
- ComputeModifiedUtf8Hash("Ljava/lang/DexCache;"), nullptr);
-
// Find all the already-marked classes.
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
FindImageClassesVisitor visitor(this);
@@ -1251,25 +1255,25 @@
marked_objects_.insert(object);
if (object->IsClass()) {
- // If it is a class, add it.
- StackHandleScope<1> hs(self_);
- MaybeAddToImageClasses(hs.NewHandle(object->AsClass()), image_class_descriptors_);
+ // Add to the TODO list since MaybeAddToImageClasses may cause thread suspension. Thread
+ // suspensionb is not safe to do in VisitObjects or VisitReferences.
+ to_insert_.push_back(hs_.NewHandle(object->AsClass()));
} else {
// Else visit the object's class.
VisitClinitClassesObject(object->GetClass());
}
// If it is not a DexCache, visit all references.
- mirror::Class* klass = object->GetClass();
- if (klass != dex_cache_class_) {
+ if (!object->IsDexCache()) {
object->VisitReferences(*this, *this);
}
}
+ VariableSizedHandleScope& hs_;
+ mutable std::vector<Handle<mirror::Class>> to_insert_;
mutable std::unordered_set<mirror::Object*> marked_objects_;
std::unordered_set<std::string>* const image_class_descriptors_;
std::vector<mirror::Class*> image_classes_;
- const mirror::Class* dex_cache_class_;
Thread* const self_;
const char* old_cause_;
@@ -1285,12 +1289,12 @@
// Suspend all threads.
ScopedSuspendAll ssa(__FUNCTION__);
+ VariableSizedHandleScope hs(Thread::Current());
std::string error_msg;
- std::unique_ptr<ClinitImageUpdate> update(ClinitImageUpdate::Create(image_classes_.get(),
+ std::unique_ptr<ClinitImageUpdate> update(ClinitImageUpdate::Create(hs,
+ image_classes_.get(),
Thread::Current(),
- runtime->GetClassLinker(),
- &error_msg));
- CHECK(update.get() != nullptr) << error_msg; // TODO: Soft failure?
+ runtime->GetClassLinker()));
// Do the marking.
update->Walk();
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index b580049..6f48779 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -94,7 +94,7 @@
/*low_4gb*/false,
&error_msg));
CHECK(ef.get() != nullptr) << error_msg;
- CHECK(ef->Load(false, /*low_4gb*/false, &error_msg)) << error_msg;
+ CHECK(ef->Load(file.get(), false, /*low_4gb*/false, &error_msg)) << error_msg;
EXPECT_EQ(dl_oatdata, ef->FindDynamicSymbolAddress("oatdata"));
EXPECT_EQ(dl_oatexec, ef->FindDynamicSymbolAddress("oatexec"));
EXPECT_EQ(dl_oatlastword, ef->FindDynamicSymbolAddress("oatlastword"));
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 7560011..8f15ea4 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -1329,7 +1329,7 @@
root->Assign(VisitReference(root->AsMirrorPtr()));
}
- ALWAYS_INLINE void operator() (mirror::Object* obj,
+ ALWAYS_INLINE void operator() (ObjPtr<mirror::Object> obj,
MemberOffset offset,
bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1338,8 +1338,8 @@
obj->SetFieldObject</*kTransactionActive*/false>(offset, VisitReference(ref));
}
- ALWAYS_INLINE void operator() (mirror::Class* klass ATTRIBUTE_UNUSED,
- mirror::Reference* ref) const
+ ALWAYS_INLINE void operator() (ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) {
ref->SetReferent</*kTransactionActive*/false>(
VisitReference(ref->GetReferent<kWithoutReadBarrier>()));
@@ -1498,10 +1498,15 @@
// Calculate how big the intern table will be after being serialized.
InternTable* const intern_table = image_info.intern_table_.get();
CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings";
- image_info.intern_table_bytes_ = intern_table->WriteToMemory(nullptr);
+ if (intern_table->StrongSize() != 0u) {
+ image_info.intern_table_bytes_ = intern_table->WriteToMemory(nullptr);
+ }
// Calculate the size of the class table.
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- image_info.class_table_bytes_ += image_info.class_table_->WriteToMemory(nullptr);
+ DCHECK_EQ(image_info.class_table_->NumZygoteClasses(), 0u);
+ if (image_info.class_table_->NumNonZygoteClasses() != 0u) {
+ image_info.class_table_bytes_ += image_info.class_table_->WriteToMemory(nullptr);
+ }
}
// Calculate bin slot offsets.
@@ -1947,18 +1952,19 @@
void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
- void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<Object> obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
- Object* ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
+ ObjPtr<Object> ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
// Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
// image.
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
offset,
- image_writer_->GetImageAddress(ref));
+ image_writer_->GetImageAddress(ref.Ptr()));
}
// java.lang.ref.Reference visitor.
- void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
mirror::Reference::ReferentOffset(),
@@ -1975,14 +1981,14 @@
FixupClassVisitor(ImageWriter* image_writer, Object* copy) : FixupVisitor(image_writer, copy) {
}
- void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<Object> obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
DCHECK(obj->IsClass());
FixupVisitor::operator()(obj, offset, /*is_static*/false);
}
- void operator()(mirror::Class* klass ATTRIBUTE_UNUSED,
- mirror::Reference* ref ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
LOG(FATAL) << "Reference not expected here.";
}
@@ -2051,7 +2057,7 @@
void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) {
orig->FixupNativePointers(copy, target_ptr_size_, NativeLocationVisitor(this));
FixupClassVisitor visitor(this, copy);
- static_cast<mirror::Object*>(orig)->VisitReferences(visitor, visitor);
+ ObjPtr<mirror::Object>(orig)->VisitReferences(visitor, visitor);
// Remove the clinitThreadId. This is required for image determinism.
copy->SetClinitThreadId(static_cast<pid_t>(0));
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index e26fa7f..0ce1362 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1090,7 +1090,7 @@
original_argc = argc;
original_argv = argv;
- InitLogging(argv);
+ InitLogging(argv, Runtime::Aborter);
// Skip over argv[0].
argv++;
@@ -1920,15 +1920,14 @@
TimingLogger::ScopedTiming t("dex2oat OatFile copy", timings_);
std::unique_ptr<File> in(OS::OpenFileForReading(oat_filenames_[i]));
std::unique_ptr<File> out(OS::CreateEmptyFile(oat_unstripped_[i]));
- size_t buffer_size = 8192;
- std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
- while (true) {
- int bytes_read = TEMP_FAILURE_RETRY(read(in->Fd(), buffer.get(), buffer_size));
- if (bytes_read <= 0) {
- break;
- }
- bool write_ok = out->WriteFully(buffer.get(), bytes_read);
- CHECK(write_ok);
+ int64_t in_length = in->GetLength();
+ if (in_length < 0) {
+ PLOG(ERROR) << "Failed to get the length of oat file: " << in->GetPath();
+ return false;
+ }
+ if (!out->Copy(in.get(), 0, in_length)) {
+ PLOG(ERROR) << "Failed to copy oat file to file: " << out->GetPath();
+ return false;
}
if (out->FlushCloseOrErase() != 0) {
PLOG(ERROR) << "Failed to flush and close copied oat file: " << oat_unstripped_[i];
diff --git a/dexdump/dexdump_main.cc b/dexdump/dexdump_main.cc
index 5c032a0..74cae3c 100644
--- a/dexdump/dexdump_main.cc
+++ b/dexdump/dexdump_main.cc
@@ -29,6 +29,7 @@
#include <unistd.h>
#include "base/logging.h"
+#include "runtime.h"
#include "mem_map.h"
namespace art {
@@ -59,7 +60,7 @@
*/
int dexdumpDriver(int argc, char** argv) {
// Art specific set up.
- InitLogging(argv);
+ InitLogging(argv, Runtime::Aborter);
MemMap::Init();
// Reset options.
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index 728e389..2203fba 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -31,6 +31,7 @@
#include "base/logging.h"
#include "jit/offline_profiling_info.h"
+#include "runtime.h"
#include "mem_map.h"
namespace art {
@@ -65,7 +66,7 @@
*/
int DexlayoutDriver(int argc, char** argv) {
// Art specific set up.
- InitLogging(argv);
+ InitLogging(argv, Runtime::Aborter);
MemMap::Init();
// Reset options.
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index a1bde0e..68473c4 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -213,7 +213,7 @@
*/
int dexlistDriver(int argc, char** argv) {
// Art specific set up.
- InitLogging(argv);
+ InitLogging(argv, Runtime::Aborter);
MemMap::Init();
// Reset options.
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index b5c252d..986f265 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -442,7 +442,7 @@
return ERROR_OAT_FILE;
}
- const std::string& file_path = oat_in->GetFile().GetPath();
+ const std::string& file_path = oat_in->GetFilePath();
const OatHeader* oat_header = GetOatHeader(oat_in);
if (oat_header == nullptr) {
@@ -725,15 +725,16 @@
}
-void PatchOat::PatchVisitor::operator() (mirror::Object* obj, MemberOffset off,
+void PatchOat::PatchVisitor::operator() (ObjPtr<mirror::Object> obj,
+ MemberOffset off,
bool is_static_unused ATTRIBUTE_UNUSED) const {
mirror::Object* referent = obj->GetFieldObject<mirror::Object, kVerifyNone>(off);
mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent);
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
}
-void PatchOat::PatchVisitor::operator() (mirror::Class* cls ATTRIBUTE_UNUSED,
- mirror::Reference* ref) const {
+void PatchOat::PatchVisitor::operator() (ObjPtr<mirror::Class> cls ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref) const {
MemberOffset off = mirror::Reference::ReferentOffset();
mirror::Object* referent = ref->GetReferent();
DCHECK(referent == nullptr ||
@@ -856,7 +857,7 @@
}
OatHeader* oat_header = reinterpret_cast<OatHeader*>(oat_file->Begin() + rodata_sec->sh_offset);
if (!oat_header->IsValid()) {
- LOG(ERROR) << "Elf file " << oat_file->GetFile().GetPath() << " has an invalid oat header";
+ LOG(ERROR) << "Elf file " << oat_file->GetFilePath() << " has an invalid oat header";
return false;
}
oat_header->RelocateOat(delta_);
@@ -864,10 +865,11 @@
}
bool PatchOat::PatchElf() {
- if (oat_file_->Is64Bit())
+ if (oat_file_->Is64Bit()) {
return PatchElf<ElfFileImpl64>(oat_file_->GetImpl64());
- else
+ } else {
return PatchElf<ElfFileImpl32>(oat_file_->GetImpl32());
+ }
}
template <typename ElfFileImpl>
@@ -1368,15 +1370,13 @@
}
static int patchoat(int argc, char **argv) {
- InitLogging(argv);
+ InitLogging(argv, Runtime::Aborter);
MemMap::Init();
const bool debug = kIsDebugBuild;
orig_argc = argc;
orig_argv = argv;
TimingLogger timings("patcher", false, false);
- InitLogging(argv);
-
// Skip over the command name.
argv++;
argc--;
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index a97b051..e7a3e91 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -198,10 +198,10 @@
public:
PatchVisitor(PatchOat* patcher, mirror::Object* copy) : patcher_(patcher), copy_(copy) {}
~PatchVisitor() {}
- void operator() (mirror::Object* obj, MemberOffset off, bool b) const
+ void operator() (ObjPtr<mirror::Object> obj, MemberOffset off, bool b) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// For reference classes.
- void operator() (mirror::Class* cls, mirror::Reference* ref) const
+ void operator() (ObjPtr<mirror::Class> cls, ObjPtr<mirror::Reference> ref) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// TODO: Consider using these for updating native class roots?
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
diff --git a/profman/profman.cc b/profman/profman.cc
index 7722e80..b17816b 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -33,6 +33,7 @@
#include "base/unix_file/fd_file.h"
#include "dex_file.h"
#include "jit/offline_profiling_info.h"
+#include "runtime.h"
#include "utils.h"
#include "zip_archive.h"
#include "profile_assistant.h"
@@ -143,7 +144,7 @@
original_argc = argc;
original_argv = argv;
- InitLogging(argv);
+ InitLogging(argv, Runtime::Aborter);
// Skip over the command name.
argv++;
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 73c6cf1..1aa6a00 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -227,9 +227,10 @@
case kDirect:
return !IsDirect() || IsStatic();
case kVirtual: {
- // We have an error if we are direct or a non-default, non-miranda interface method.
+ // We have an error if we are direct or a non-copied (i.e. not part of a real class) interface
+ // method.
mirror::Class* methods_class = GetDeclaringClass();
- return IsDirect() || (methods_class->IsInterface() && !IsDefault() && !IsMiranda());
+ return IsDirect() || (methods_class->IsInterface() && !IsCopied());
}
case kSuper:
// Constructors and static methods are called with invoke-direct.
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index 12d3be7..f24a862 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -296,7 +296,7 @@
return const_iterator(this, NumBuckets());
}
- bool Empty() {
+ bool Empty() const {
return Size() == 0;
}
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 08c036e..6b21a56 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -21,14 +21,12 @@
#include <sstream>
#include "base/mutex.h"
-#include "runtime.h"
#include "thread-inl.h"
#include "utils.h"
// Headers for LogMessage::LogLine.
#ifdef ART_TARGET_ANDROID
#include <android/log.h>
-#include <android/set_abort_message.h>
#else
#include <sys/types.h>
#include <unistd.h>
@@ -57,17 +55,7 @@
: "art";
}
-NO_RETURN
-static void RuntimeAborter(const char* abort_message) {
-#ifdef __ANDROID__
- android_set_abort_message(abort_message);
-#else
- UNUSED(abort_message);
-#endif
- Runtime::Abort(abort_message);
-}
-
-void InitLogging(char* argv[]) {
+void InitLogging(char* argv[], AbortFunction& abort_function) {
if (gCmdLine.get() != nullptr) {
return;
}
@@ -97,7 +85,8 @@
#else
#define INIT_LOGGING_DEFAULT_LOGGER android::base::StderrLogger
#endif
- android::base::InitLogging(argv, INIT_LOGGING_DEFAULT_LOGGER, RuntimeAborter);
+ android::base::InitLogging(argv, INIT_LOGGING_DEFAULT_LOGGER,
+ std::move<AbortFunction>(abort_function));
#undef INIT_LOGGING_DEFAULT_LOGGER
}
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 5f84204..a173ac2 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -29,6 +29,9 @@
using ::android::base::LogSeverity;
using ::android::base::ScopedLogSeverity;
+// Abort function.
+using AbortFunction = void(const char*);
+
// The members of this struct are the valid arguments to VLOG and VLOG_IS_ON in code,
// and the "-verbose:" command line argument.
struct LogVerbosity {
@@ -71,7 +74,7 @@
// The tag (or '*' for the global level) comes first, followed by a colon
// and a letter indicating the minimum priority level we're expected to log.
// This can be used to reveal or conceal logs with specific tags.
-extern void InitLogging(char* argv[]);
+extern void InitLogging(char* argv[], AbortFunction& default_aborter);
// Returns the command line used to invoke the current tool or null if InitLogging hasn't been
// performed.
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 1183dea..e77e6d7 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -64,6 +64,8 @@
Mutex* Locks::trace_lock_ = nullptr;
Mutex* Locks::unexpected_signal_lock_ = nullptr;
Uninterruptible Roles::uninterruptible_;
+ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
+Mutex* Locks::jni_weak_globals_lock_ = nullptr;
struct AllMutexData {
// A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
@@ -1088,6 +1090,15 @@
DCHECK(reference_queue_soft_references_lock_ == nullptr);
reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kJniGlobalsLock);
+ DCHECK(jni_globals_lock_ == nullptr);
+ jni_globals_lock_ =
+ new ReaderWriterMutex("JNI global reference table lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kJniWeakGlobalsLock);
+ DCHECK(jni_weak_globals_lock_ == nullptr);
+ jni_weak_globals_lock_ = new Mutex("JNI weak global reference table lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
DCHECK(abort_lock_ == nullptr);
abort_lock_ = new Mutex("abort lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index b3ff6c2..e0cca7b 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -68,6 +68,7 @@
kMarkSweepMarkStackLock,
kTransactionLogLock,
kJniWeakGlobalsLock,
+ kJniGlobalsLock,
kReferenceQueueSoftReferencesLock,
kReferenceQueuePhantomReferencesLock,
kReferenceQueueFinalizerReferencesLock,
@@ -678,8 +679,14 @@
// Guards soft references queue.
static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
+ // Guard accesses to the JNI Global Reference table.
+ static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
+
+ // Guard accesses to the JNI Weak Global Reference table.
+ static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
+
// Have an exclusive aborting thread.
- static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
+ static Mutex* abort_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
// Allow mutual exclusion when manipulating Thread::suspend_count_.
// TODO: Does the trade-off of a per-thread lock make sense?
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index d7d6f2b..d07aa89 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -316,7 +316,6 @@
ClassLinker::ClassLinker(InternTable* intern_table)
// dex_lock_ is recursive as it may be used in stack dumping.
: dex_lock_("ClassLinker dex lock", kDexLock),
- dex_cache_boot_image_class_lookup_required_(false),
failed_dex_cache_class_lookups_(0),
class_roots_(nullptr),
array_iftable_(nullptr),
@@ -969,7 +968,6 @@
return false;
}
}
- dex_cache_boot_image_class_lookup_required_ = true;
std::vector<const OatFile*> oat_files =
runtime->GetOatFileManager().RegisterImageOatFiles(spaces);
DCHECK(!oat_files.empty());
@@ -1256,7 +1254,6 @@
// Add image classes into the class table for the class loader, and fixup the dex caches and
// class loader fields.
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- ClassTable* table = InsertClassTableForClassLoader(class_loader.Get());
// Dex cache array fixup is all or nothing, we must reject app images that have mixed since we
// rely on clobering the dex cache arrays in the image to forward to bss.
size_t num_dex_caches_with_bss_arrays = 0;
@@ -1393,103 +1390,42 @@
StackHandleScope<1> hs3(self);
RegisterDexFileLocked(*dex_file, hs3.NewHandle(dex_cache));
}
- GcRoot<mirror::Class>* const types = dex_cache->GetResolvedTypes();
- const size_t num_types = dex_cache->NumResolvedTypes();
- if (new_class_set == nullptr) {
- for (int32_t j = 0; j < static_cast<int32_t>(num_types); j++) {
- // The image space is not yet added to the heap, avoid read barriers.
- mirror::Class* klass = types[j].Read();
- // There may also be boot image classes,
- if (space->HasAddress(klass)) {
- DCHECK_NE(klass->GetStatus(), mirror::Class::kStatusError);
- // Update the class loader from the one in the image class loader to the one that loaded
- // the app image.
- klass->SetClassLoader(class_loader.Get());
- // The resolved type could be from another dex cache, go through the dex cache just in
- // case. May be null for array classes.
- if (klass->GetDexCacheStrings() != nullptr) {
- DCHECK(!klass->IsArrayClass());
- klass->SetDexCacheStrings(klass->GetDexCache()->GetStrings());
- }
- // If there are multiple dex caches, there may be the same class multiple times
- // in different dex caches. Check for this since inserting will add duplicates
- // otherwise.
- if (num_dex_caches > 1) {
- mirror::Class* existing = table->LookupByDescriptor(klass);
- if (existing != nullptr) {
- DCHECK_EQ(existing, klass) << PrettyClass(klass);
- } else {
- table->Insert(klass);
- }
- } else {
- table->Insert(klass);
- }
- // Double checked VLOG to avoid overhead.
- if (VLOG_IS_ON(image)) {
- VLOG(image) << PrettyClass(klass) << " " << klass->GetStatus();
- if (!klass->IsArrayClass()) {
- VLOG(image) << "From " << klass->GetDexCache()->GetDexFile()->GetBaseLocation();
- }
- VLOG(image) << "Direct methods";
- for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
- VLOG(image) << PrettyMethod(&m);
- }
- VLOG(image) << "Virtual methods";
- for (ArtMethod& m : klass->GetVirtualMethods(kRuntimePointerSize)) {
- VLOG(image) << PrettyMethod(&m);
- }
- }
- } else {
- DCHECK(klass == nullptr || heap->ObjectIsInBootImageSpace(klass))
- << klass << " " << PrettyClass(klass);
- }
- }
- }
if (kIsDebugBuild) {
+ CHECK(new_class_set != nullptr);
+ GcRoot<mirror::Class>* const types = dex_cache->GetResolvedTypes();
+ const size_t num_types = dex_cache->NumResolvedTypes();
for (int32_t j = 0; j < static_cast<int32_t>(num_types); j++) {
// The image space is not yet added to the heap, avoid read barriers.
mirror::Class* klass = types[j].Read();
if (space->HasAddress(klass)) {
DCHECK_NE(klass->GetStatus(), mirror::Class::kStatusError);
- if (kIsDebugBuild) {
- if (new_class_set != nullptr) {
- auto it = new_class_set->Find(GcRoot<mirror::Class>(klass));
- DCHECK(it != new_class_set->end());
- DCHECK_EQ(it->Read(), klass);
- mirror::Class* super_class = klass->GetSuperClass();
- if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) {
- auto it2 = new_class_set->Find(GcRoot<mirror::Class>(super_class));
- DCHECK(it2 != new_class_set->end());
- DCHECK_EQ(it2->Read(), super_class);
- }
- } else {
- DCHECK_EQ(table->LookupByDescriptor(klass), klass);
- mirror::Class* super_class = klass->GetSuperClass();
- if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) {
- CHECK_EQ(table->LookupByDescriptor(super_class), super_class);
- }
+ auto it = new_class_set->Find(GcRoot<mirror::Class>(klass));
+ DCHECK(it != new_class_set->end());
+ DCHECK_EQ(it->Read(), klass);
+ mirror::Class* super_class = klass->GetSuperClass();
+ if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) {
+ auto it2 = new_class_set->Find(GcRoot<mirror::Class>(super_class));
+ DCHECK(it2 != new_class_set->end());
+ DCHECK_EQ(it2->Read(), super_class);
+ }
+ for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
+ const void* code = m.GetEntryPointFromQuickCompiledCode();
+ const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
+ if (!IsQuickResolutionStub(code) &&
+ !IsQuickGenericJniStub(code) &&
+ !IsQuickToInterpreterBridge(code) &&
+ !m.IsNative()) {
+ DCHECK_EQ(code, oat_code) << PrettyMethod(&m);
}
}
- if (kIsDebugBuild) {
- for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
- const void* code = m.GetEntryPointFromQuickCompiledCode();
- const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
- if (!IsQuickResolutionStub(code) &&
- !IsQuickGenericJniStub(code) &&
- !IsQuickToInterpreterBridge(code) &&
- !m.IsNative()) {
- DCHECK_EQ(code, oat_code) << PrettyMethod(&m);
- }
- }
- for (ArtMethod& m : klass->GetVirtualMethods(kRuntimePointerSize)) {
- const void* code = m.GetEntryPointFromQuickCompiledCode();
- const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
- if (!IsQuickResolutionStub(code) &&
- !IsQuickGenericJniStub(code) &&
- !IsQuickToInterpreterBridge(code) &&
- !m.IsNative()) {
- DCHECK_EQ(code, oat_code) << PrettyMethod(&m);
- }
+ for (ArtMethod& m : klass->GetVirtualMethods(kRuntimePointerSize)) {
+ const void* code = m.GetEntryPointFromQuickCompiledCode();
+ const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
+ if (!IsQuickResolutionStub(code) &&
+ !IsQuickGenericJniStub(code) &&
+ !IsQuickToInterpreterBridge(code) &&
+ !m.IsNative()) {
+ DCHECK_EQ(code, oat_code) << PrettyMethod(&m);
}
}
}
@@ -1806,9 +1742,6 @@
temp_set = ClassTable::ClassSet(space->Begin() + class_table_section.Offset(),
/*make copy*/false,
&read_count);
- if (!app_image) {
- dex_cache_boot_image_class_lookup_required_ = false;
- }
VLOG(image) << "Adding class table classes took " << PrettyDuration(NanoTime() - start_time2);
}
if (app_image) {
@@ -1816,7 +1749,7 @@
if (!UpdateAppImageClassLoadersAndDexCaches(space,
class_loader,
dex_caches,
- added_class_table ? &temp_set : nullptr,
+ &temp_set,
/*out*/&forward_dex_cache_arrays,
/*out*/error_msg)) {
return false;
@@ -1826,10 +1759,8 @@
UpdateClassLoaderAndResolvedStringsVisitor visitor(space,
class_loader.Get(),
forward_dex_cache_arrays);
- if (added_class_table) {
- for (GcRoot<mirror::Class>& root : temp_set) {
- visitor(root.Read());
- }
+ for (GcRoot<mirror::Class>& root : temp_set) {
+ visitor(root.Read());
}
// forward_dex_cache_arrays is true iff we copied all of the dex cache arrays into the .bss.
// In this case, madvise away the dex cache arrays section of the image to reduce RAM usage and
@@ -1963,9 +1894,6 @@
}
void ClassLinker::VisitClasses(ClassVisitor* visitor) {
- if (dex_cache_boot_image_class_lookup_required_) {
- AddBootImageClassesToClassTable();
- }
Thread* const self = Thread::Current();
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
// Not safe to have thread suspension when we are holding a lock.
@@ -3609,17 +3537,6 @@
if (existing != nullptr) {
return existing;
}
- if (kIsDebugBuild &&
- !klass->IsTemp() &&
- class_loader == nullptr &&
- dex_cache_boot_image_class_lookup_required_) {
- // Check a class loaded with the system class loader matches one in the image if the class
- // is in the image.
- existing = LookupClassFromBootImage(descriptor);
- if (existing != nullptr) {
- CHECK_EQ(klass, existing);
- }
- }
VerifyObject(klass);
class_table->InsertWithHash(klass, hash);
if (class_loader != nullptr) {
@@ -3659,90 +3576,15 @@
const char* descriptor,
size_t hash,
mirror::ClassLoader* class_loader) {
- {
- ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- ClassTable* const class_table = ClassTableForClassLoader(class_loader);
- if (class_table != nullptr) {
- mirror::Class* result = class_table->Lookup(descriptor, hash);
- if (result != nullptr) {
- return result;
- }
+ ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ ClassTable* const class_table = ClassTableForClassLoader(class_loader);
+ if (class_table != nullptr) {
+ mirror::Class* result = class_table->Lookup(descriptor, hash);
+ if (result != nullptr) {
+ return result;
}
}
- if (class_loader != nullptr || !dex_cache_boot_image_class_lookup_required_) {
- return nullptr;
- }
- // Lookup failed but need to search dex_caches_.
- mirror::Class* result = LookupClassFromBootImage(descriptor);
- if (result != nullptr) {
- result = InsertClass(descriptor, result, hash);
- } else {
- // Searching the image dex files/caches failed, we don't want to get into this situation
- // often as map searches are faster, so after kMaxFailedDexCacheLookups move all image
- // classes into the class table.
- constexpr uint32_t kMaxFailedDexCacheLookups = 1000;
- if (++failed_dex_cache_class_lookups_ > kMaxFailedDexCacheLookups) {
- AddBootImageClassesToClassTable();
- }
- }
- return result;
-}
-
-static std::vector<mirror::ObjectArray<mirror::DexCache>*> GetImageDexCaches(
- std::vector<gc::space::ImageSpace*> image_spaces) REQUIRES_SHARED(Locks::mutator_lock_) {
- CHECK(!image_spaces.empty());
- std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector;
- for (gc::space::ImageSpace* image_space : image_spaces) {
- mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
- DCHECK(root != nullptr);
- dex_caches_vector.push_back(root->AsObjectArray<mirror::DexCache>());
- }
- return dex_caches_vector;
-}
-
-void ClassLinker::AddBootImageClassesToClassTable() {
- if (dex_cache_boot_image_class_lookup_required_) {
- AddImageClassesToClassTable(Runtime::Current()->GetHeap()->GetBootImageSpaces(),
- /*class_loader*/nullptr);
- dex_cache_boot_image_class_lookup_required_ = false;
- }
-}
-
-void ClassLinker::AddImageClassesToClassTable(std::vector<gc::space::ImageSpace*> image_spaces,
- mirror::ClassLoader* class_loader) {
- Thread* self = Thread::Current();
- WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- ScopedAssertNoThreadSuspension ants("Moving image classes to class table");
-
- ClassTable* const class_table = InsertClassTableForClassLoader(class_loader);
-
- std::string temp;
- std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector =
- GetImageDexCaches(image_spaces);
- for (mirror::ObjectArray<mirror::DexCache>* dex_caches : dex_caches_vector) {
- for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- GcRoot<mirror::Class>* types = dex_cache->GetResolvedTypes();
- for (int32_t j = 0, num_types = dex_cache->NumResolvedTypes(); j < num_types; j++) {
- mirror::Class* klass = types[j].Read();
- if (klass != nullptr) {
- DCHECK_EQ(klass->GetClassLoader(), class_loader);
- const char* descriptor = klass->GetDescriptor(&temp);
- size_t hash = ComputeModifiedUtf8Hash(descriptor);
- mirror::Class* existing = class_table->Lookup(descriptor, hash);
- if (existing != nullptr) {
- CHECK_EQ(existing, klass) << PrettyClassAndClassLoader(existing) << " != "
- << PrettyClassAndClassLoader(klass);
- } else {
- class_table->Insert(klass);
- if (log_new_class_table_roots_) {
- new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
- }
- }
- }
- }
- }
- }
+ return nullptr;
}
class MoveClassTableToPreZygoteVisitor : public ClassLoaderVisitor {
@@ -3766,28 +3608,6 @@
VisitClassLoaders(&visitor);
}
-mirror::Class* ClassLinker::LookupClassFromBootImage(const char* descriptor) {
- ScopedAssertNoThreadSuspension ants("Image class lookup");
- std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector =
- GetImageDexCaches(Runtime::Current()->GetHeap()->GetBootImageSpaces());
- for (mirror::ObjectArray<mirror::DexCache>* dex_caches : dex_caches_vector) {
- for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- const DexFile* dex_file = dex_cache->GetDexFile();
- // Try binary searching the type index by descriptor.
- const DexFile::TypeId* type_id = dex_file->FindTypeId(descriptor);
- if (type_id != nullptr) {
- uint16_t type_idx = dex_file->GetIndexForTypeId(*type_id);
- mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
- if (klass != nullptr) {
- return klass;
- }
- }
- }
- }
- return nullptr;
-}
-
// Look up classes by hash and descriptor and put all matching ones in the result array.
class LookupClassesVisitor : public ClassLoaderVisitor {
public:
@@ -3813,9 +3633,6 @@
void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Class*>& result) {
result.clear();
- if (dex_cache_boot_image_class_lookup_required_) {
- AddBootImageClassesToClassTable();
- }
Thread* const self = Thread::Current();
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
const size_t hash = ComputeModifiedUtf8Hash(descriptor);
@@ -5218,14 +5035,6 @@
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
}
CHECK_EQ(existing, klass.Get());
- if (kIsDebugBuild && class_loader == nullptr && dex_cache_boot_image_class_lookup_required_) {
- // Check a class loaded with the system class loader matches one in the image if the class
- // is in the image.
- mirror::Class* const image_class = LookupClassFromBootImage(descriptor);
- if (image_class != nullptr) {
- CHECK_EQ(klass.Get(), existing) << descriptor;
- }
- }
if (log_new_class_table_roots_) {
new_class_roots_.push_back(GcRoot<mirror::Class>(h_new_class.Get()));
}
@@ -8094,9 +7903,6 @@
void ClassLinker::DumpForSigQuit(std::ostream& os) {
ScopedObjectAccess soa(Thread::Current());
- if (dex_cache_boot_image_class_lookup_required_) {
- AddBootImageClassesToClassTable();
- }
ReaderMutexLock mu(soa.Self(), *Locks::classlinker_classes_lock_);
os << "Zygote loaded classes=" << NumZygoteClasses() << " post zygote classes="
<< NumNonZygoteClasses() << "\n";
@@ -8132,9 +7938,6 @@
}
size_t ClassLinker::NumLoadedClasses() {
- if (dex_cache_boot_image_class_lookup_required_) {
- AddBootImageClassesToClassTable();
- }
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
// Only return non zygote classes since these are the ones which apps which care about.
return NumNonZygoteClasses();
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 43ffc8e..70cc768 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -561,17 +561,6 @@
return class_roots;
}
- // Move all of the boot image classes into the class table for faster lookups.
- void AddBootImageClassesToClassTable()
- REQUIRES(!Locks::classlinker_classes_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Add image classes to the class table.
- void AddImageClassesToClassTable(std::vector<gc::space::ImageSpace*> image_spaces,
- mirror::ClassLoader* class_loader)
- REQUIRES(!Locks::classlinker_classes_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Move the class table to the pre-zygote table to reduce memory usage. This works by ensuring
// that no more classes are ever added to the pre zygote table which makes it that the pages
// always remain shared dirty instead of private dirty.
@@ -1050,9 +1039,6 @@
void EnsureSkipAccessChecksMethods(Handle<mirror::Class> c)
REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Class* LookupClassFromBootImage(const char* descriptor)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Register a class loader and create its class table and allocator. Should not be called if
// these are already created.
void RegisterClassLoader(mirror::ClassLoader* class_loader)
@@ -1157,8 +1143,6 @@
// New class roots, only used by CMS since the GC needs to mark these in the pause.
std::vector<GcRoot<mirror::Class>> new_class_roots_ GUARDED_BY(Locks::classlinker_classes_lock_);
- // Do we need to search dex caches to find boot image classes?
- bool dex_cache_boot_image_class_lookup_required_;
// Number of times we've searched dex caches for a class. After a certain number of misses we move
// the classes into the class_table_ to avoid dex cache based searches.
Atomic<uint32_t> failed_dex_cache_class_lookups_;
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 193f6ee..5409fcb 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -57,7 +57,7 @@
// everything else. In case you want to see all messages, comment out the line.
setenv("ANDROID_LOG_TAGS", "*:e", 1);
- art::InitLogging(argv);
+ art::InitLogging(argv, art::Runtime::Aborter);
LOG(INFO) << "Running main() from common_runtime_test.cc...";
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 096f003..2ea7bb6 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -36,8 +36,7 @@
ElfFileImpl<ElfTypes>::ElfFileImpl(File* file, bool writable,
bool program_header_only,
uint8_t* requested_base)
- : file_(file),
- writable_(writable),
+ : writable_(writable),
program_header_only_(program_header_only),
header_(nullptr),
base_address_(nullptr),
@@ -74,7 +73,7 @@
prot = PROT_READ;
flags = MAP_PRIVATE;
}
- if (!elf_file->Setup(prot, flags, low_4gb, error_msg)) {
+ if (!elf_file->Setup(file, prot, flags, low_4gb, error_msg)) {
return nullptr;
}
return elf_file.release();
@@ -89,39 +88,44 @@
std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(new ElfFileImpl<ElfTypes>
(file, (prot & PROT_WRITE) == PROT_WRITE, /*program_header_only*/false,
/*requested_base*/nullptr));
- if (!elf_file->Setup(prot, flags, low_4gb, error_msg)) {
+ if (!elf_file->Setup(file, prot, flags, low_4gb, error_msg)) {
return nullptr;
}
return elf_file.release();
}
template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::Setup(int prot, int flags, bool low_4gb, std::string* error_msg) {
- int64_t temp_file_length = file_->GetLength();
+bool ElfFileImpl<ElfTypes>::Setup(File* file,
+ int prot,
+ int flags,
+ bool low_4gb,
+ std::string* error_msg) {
+ int64_t temp_file_length = file->GetLength();
if (temp_file_length < 0) {
errno = -temp_file_length;
*error_msg = StringPrintf("Failed to get length of file: '%s' fd=%d: %s",
- file_->GetPath().c_str(), file_->Fd(), strerror(errno));
+ file->GetPath().c_str(), file->Fd(), strerror(errno));
return false;
}
size_t file_length = static_cast<size_t>(temp_file_length);
if (file_length < sizeof(Elf_Ehdr)) {
*error_msg = StringPrintf("File size of %zd bytes not large enough to contain ELF header of "
"%zd bytes: '%s'", file_length, sizeof(Elf_Ehdr),
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (program_header_only_) {
// first just map ELF header to get program header size information
size_t elf_header_size = sizeof(Elf_Ehdr);
- if (!SetMap(MemMap::MapFile(elf_header_size,
+ if (!SetMap(file,
+ MemMap::MapFile(elf_header_size,
prot,
flags,
- file_->Fd(),
+ file->Fd(),
0,
low_4gb,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
error_msg),
error_msg)) {
return false;
@@ -131,16 +135,17 @@
if (file_length < program_header_size) {
*error_msg = StringPrintf("File size of %zd bytes not large enough to contain ELF program "
"header of %zd bytes: '%s'", file_length,
- sizeof(Elf_Ehdr), file_->GetPath().c_str());
+ sizeof(Elf_Ehdr), file->GetPath().c_str());
return false;
}
- if (!SetMap(MemMap::MapFile(program_header_size,
+ if (!SetMap(file,
+ MemMap::MapFile(program_header_size,
prot,
flags,
- file_->Fd(),
+ file->Fd(),
0,
low_4gb,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
error_msg),
error_msg)) {
*error_msg = StringPrintf("Failed to map ELF program headers: %s", error_msg->c_str());
@@ -148,13 +153,14 @@
}
} else {
// otherwise map entire file
- if (!SetMap(MemMap::MapFile(file_->GetLength(),
+ if (!SetMap(file,
+ MemMap::MapFile(file->GetLength(),
prot,
flags,
- file_->Fd(),
+ file->Fd(),
0,
low_4gb,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
error_msg),
error_msg)) {
*error_msg = StringPrintf("Failed to map ELF file: %s", error_msg->c_str());
@@ -178,7 +184,7 @@
Elf_Shdr* shstrtab_section_header = GetSectionNameStringSection();
if (shstrtab_section_header == nullptr) {
*error_msg = StringPrintf("Failed to find shstrtab section header in ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
@@ -186,7 +192,7 @@
dynamic_program_header_ = FindProgamHeaderByType(PT_DYNAMIC);
if (dynamic_program_header_ == nullptr) {
*error_msg = StringPrintf("Failed to find PT_DYNAMIC program header in ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
@@ -200,7 +206,7 @@
Elf_Shdr* section_header = GetSectionHeader(i);
if (section_header == nullptr) {
*error_msg = StringPrintf("Failed to find section header for section %d in ELF file: '%s'",
- i, file_->GetPath().c_str());
+ i, file->GetPath().c_str());
return false;
}
switch (section_header->sh_type) {
@@ -245,7 +251,7 @@
if (reinterpret_cast<uint8_t*>(dynamic_section_start_) !=
Begin() + section_header->sh_offset) {
LOG(WARNING) << "Failed to find matching SHT_DYNAMIC for PT_DYNAMIC in "
- << file_->GetPath() << ": " << std::hex
+ << file->GetPath() << ": " << std::hex
<< reinterpret_cast<void*>(dynamic_section_start_)
<< " != " << reinterpret_cast<void*>(Begin() + section_header->sh_offset);
return false;
@@ -263,7 +269,7 @@
}
// Check for the existence of some sections.
- if (!CheckSectionsExist(error_msg)) {
+ if (!CheckSectionsExist(file, error_msg)) {
return false;
}
}
@@ -283,7 +289,7 @@
uint8_t** target, std::string* error_msg) {
if (Begin() + offset >= End()) {
*error_msg = StringPrintf("Offset %d is out of range for %s in ELF file: '%s'", offset, label,
- file_->GetPath().c_str());
+ file_path_.c_str());
return false;
}
*target = Begin() + offset;
@@ -324,11 +330,11 @@
}
template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::CheckSectionsExist(std::string* error_msg) const {
+ bool ElfFileImpl<ElfTypes>::CheckSectionsExist(File* file, std::string* error_msg) const {
if (!program_header_only_) {
// If in full mode, need section headers.
if (section_headers_start_ == nullptr) {
- *error_msg = StringPrintf("No section headers in ELF file: '%s'", file_->GetPath().c_str());
+ *error_msg = StringPrintf("No section headers in ELF file: '%s'", file->GetPath().c_str());
return false;
}
}
@@ -336,14 +342,14 @@
// This is redundant, but defensive.
if (dynamic_program_header_ == nullptr) {
*error_msg = StringPrintf("Failed to find PT_DYNAMIC program header in ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
// Need a dynamic section. This is redundant, but defensive.
if (dynamic_section_start_ == nullptr) {
*error_msg = StringPrintf("Failed to find dynamic section in ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
@@ -352,7 +358,7 @@
if (symtab_section_start_ != nullptr) {
// When there's a symtab, there should be a strtab.
if (strtab_section_start_ == nullptr) {
- *error_msg = StringPrintf("No strtab for symtab in ELF file: '%s'", file_->GetPath().c_str());
+ *error_msg = StringPrintf("No strtab for symtab in ELF file: '%s'", file->GetPath().c_str());
return false;
}
@@ -360,25 +366,25 @@
if (!CheckSectionsLinked(reinterpret_cast<const uint8_t*>(symtab_section_start_),
reinterpret_cast<const uint8_t*>(strtab_section_start_))) {
*error_msg = StringPrintf("Symtab is not linked to the strtab in ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
}
// We always need a dynstr & dynsym.
if (dynstr_section_start_ == nullptr) {
- *error_msg = StringPrintf("No dynstr in ELF file: '%s'", file_->GetPath().c_str());
+ *error_msg = StringPrintf("No dynstr in ELF file: '%s'", file->GetPath().c_str());
return false;
}
if (dynsym_section_start_ == nullptr) {
- *error_msg = StringPrintf("No dynsym in ELF file: '%s'", file_->GetPath().c_str());
+ *error_msg = StringPrintf("No dynsym in ELF file: '%s'", file->GetPath().c_str());
return false;
}
// Need a hash section for dynamic symbol lookup.
if (hash_section_start_ == nullptr) {
*error_msg = StringPrintf("Failed to find hash section in ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
@@ -386,7 +392,7 @@
if (!CheckSectionsLinked(reinterpret_cast<const uint8_t*>(hash_section_start_),
reinterpret_cast<const uint8_t*>(dynsym_section_start_))) {
*error_msg = StringPrintf("Hash section is not linked to the dynstr in ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
@@ -397,9 +403,9 @@
// It might not be mapped, but we can compare against the file size.
int64_t offset = static_cast<int64_t>(GetHeader().e_shoff +
(GetHeader().e_shstrndx * GetHeader().e_shentsize));
- if (offset >= file_->GetLength()) {
+ if (offset >= file->GetLength()) {
*error_msg = StringPrintf("Shstrtab is not in the mapped ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
}
@@ -408,15 +414,15 @@
}
template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::SetMap(MemMap* map, std::string* error_msg) {
+bool ElfFileImpl<ElfTypes>::SetMap(File* file, MemMap* map, std::string* error_msg) {
if (map == nullptr) {
// MemMap::Open should have already set an error.
DCHECK(!error_msg->empty());
return false;
}
map_.reset(map);
- CHECK(map_.get() != nullptr) << file_->GetPath();
- CHECK(map_->Begin() != nullptr) << file_->GetPath();
+ CHECK(map_.get() != nullptr) << file->GetPath();
+ CHECK(map_->Begin() != nullptr) << file->GetPath();
header_ = reinterpret_cast<Elf_Ehdr*>(map_->Begin());
if ((ELFMAG0 != header_->e_ident[EI_MAG0])
@@ -425,7 +431,7 @@
|| (ELFMAG3 != header_->e_ident[EI_MAG3])) {
*error_msg = StringPrintf("Failed to find ELF magic value %d %d %d %d in %s, found %d %d %d %d",
ELFMAG0, ELFMAG1, ELFMAG2, ELFMAG3,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
header_->e_ident[EI_MAG0],
header_->e_ident[EI_MAG1],
header_->e_ident[EI_MAG2],
@@ -436,90 +442,90 @@
if (elf_class != header_->e_ident[EI_CLASS]) {
*error_msg = StringPrintf("Failed to find expected EI_CLASS value %d in %s, found %d",
elf_class,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
header_->e_ident[EI_CLASS]);
return false;
}
if (ELFDATA2LSB != header_->e_ident[EI_DATA]) {
*error_msg = StringPrintf("Failed to find expected EI_DATA value %d in %s, found %d",
ELFDATA2LSB,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
header_->e_ident[EI_CLASS]);
return false;
}
if (EV_CURRENT != header_->e_ident[EI_VERSION]) {
*error_msg = StringPrintf("Failed to find expected EI_VERSION value %d in %s, found %d",
EV_CURRENT,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
header_->e_ident[EI_CLASS]);
return false;
}
if (ET_DYN != header_->e_type) {
*error_msg = StringPrintf("Failed to find expected e_type value %d in %s, found %d",
ET_DYN,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
header_->e_type);
return false;
}
if (EV_CURRENT != header_->e_version) {
*error_msg = StringPrintf("Failed to find expected e_version value %d in %s, found %d",
EV_CURRENT,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
header_->e_version);
return false;
}
if (0 != header_->e_entry) {
*error_msg = StringPrintf("Failed to find expected e_entry value %d in %s, found %d",
0,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
static_cast<int32_t>(header_->e_entry));
return false;
}
if (0 == header_->e_phoff) {
*error_msg = StringPrintf("Failed to find non-zero e_phoff value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (0 == header_->e_shoff) {
*error_msg = StringPrintf("Failed to find non-zero e_shoff value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (0 == header_->e_ehsize) {
*error_msg = StringPrintf("Failed to find non-zero e_ehsize value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (0 == header_->e_phentsize) {
*error_msg = StringPrintf("Failed to find non-zero e_phentsize value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (0 == header_->e_phnum) {
*error_msg = StringPrintf("Failed to find non-zero e_phnum value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (0 == header_->e_shentsize) {
*error_msg = StringPrintf("Failed to find non-zero e_shentsize value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (0 == header_->e_shnum) {
*error_msg = StringPrintf("Failed to find non-zero e_shnum value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (0 == header_->e_shstrndx) {
*error_msg = StringPrintf("Failed to find non-zero e_shstrndx value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (header_->e_shstrndx >= header_->e_shnum) {
*error_msg = StringPrintf("Failed to find e_shnum value %d less than %d in %s",
header_->e_shstrndx,
header_->e_shnum,
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
@@ -528,14 +534,14 @@
*error_msg = StringPrintf("Failed to find e_phoff value %" PRIu64 " less than %zd in %s",
static_cast<uint64_t>(header_->e_phoff),
Size(),
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (header_->e_shoff >= Size()) {
*error_msg = StringPrintf("Failed to find e_shoff value %" PRIu64 " less than %zd in %s",
static_cast<uint64_t>(header_->e_shoff),
Size(),
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
}
@@ -577,7 +583,7 @@
template <typename ElfTypes>
typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::GetSymbolSectionStart(
Elf_Word section_type) const {
- CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
+ CHECK(IsSymbolSectionType(section_type)) << file_path_ << " " << section_type;
switch (section_type) {
case SHT_SYMTAB: {
return symtab_section_start_;
@@ -597,7 +603,7 @@
template <typename ElfTypes>
const char* ElfFileImpl<ElfTypes>::GetStringSectionStart(
Elf_Word section_type) const {
- CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
+ CHECK(IsSymbolSectionType(section_type)) << file_path_ << " " << section_type;
switch (section_type) {
case SHT_SYMTAB: {
return strtab_section_start_;
@@ -615,7 +621,7 @@
template <typename ElfTypes>
const char* ElfFileImpl<ElfTypes>::GetString(Elf_Word section_type,
Elf_Word i) const {
- CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
+ CHECK(IsSymbolSectionType(section_type)) << file_path_ << " " << section_type;
if (i == 0) {
return nullptr;
}
@@ -673,7 +679,7 @@
template <typename ElfTypes>
typename ElfTypes::Phdr* ElfFileImpl<ElfTypes>::GetProgramHeader(Elf_Word i) const {
- CHECK_LT(i, GetProgramHeaderNum()) << file_->GetPath(); // Sanity check for caller.
+ CHECK_LT(i, GetProgramHeaderNum()) << file_path_; // Sanity check for caller.
uint8_t* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize);
if (program_header >= End()) {
return nullptr; // Failure condition.
@@ -701,7 +707,7 @@
typename ElfTypes::Shdr* ElfFileImpl<ElfTypes>::GetSectionHeader(Elf_Word i) const {
// Can only access arbitrary sections when we have the whole file, not just program header.
// Even if we Load(), it doesn't bring in all the sections.
- CHECK(!program_header_only_) << file_->GetPath();
+ CHECK(!program_header_only_) << file_path_;
if (i >= GetSectionHeaderNum()) {
return nullptr; // Failure condition.
}
@@ -716,7 +722,7 @@
typename ElfTypes::Shdr* ElfFileImpl<ElfTypes>::FindSectionByType(Elf_Word type) const {
// Can only access arbitrary sections when we have the whole file, not just program header.
// We could change this to switch on known types if they were detected during loading.
- CHECK(!program_header_only_) << file_->GetPath();
+ CHECK(!program_header_only_) << file_path_;
for (Elf_Word i = 0; i < GetSectionHeaderNum(); i++) {
Elf_Shdr* section_header = GetSectionHeader(i);
if (section_header->sh_type == type) {
@@ -802,8 +808,8 @@
template <typename ElfTypes>
typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetSymbolNum(Elf_Shdr& section_header) const {
CHECK(IsSymbolSectionType(section_header.sh_type))
- << file_->GetPath() << " " << section_header.sh_type;
- CHECK_NE(0U, section_header.sh_entsize) << file_->GetPath();
+ << file_path_ << " " << section_header.sh_type;
+ CHECK_NE(0U, section_header.sh_entsize) << file_path_;
return section_header.sh_size / section_header.sh_entsize;
}
@@ -819,7 +825,7 @@
template <typename ElfTypes>
typename ElfFileImpl<ElfTypes>::SymbolTable**
ElfFileImpl<ElfTypes>::GetSymbolTable(Elf_Word section_type) {
- CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
+ CHECK(IsSymbolSectionType(section_type)) << file_path_ << " " << section_type;
switch (section_type) {
case SHT_SYMTAB: {
return &symtab_symbol_table_;
@@ -837,8 +843,8 @@
template <typename ElfTypes>
typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::FindSymbolByName(
Elf_Word section_type, const std::string& symbol_name, bool build_map) {
- CHECK(!program_header_only_) << file_->GetPath();
- CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
+ CHECK(!program_header_only_) << file_path_;
+ CHECK(IsSymbolSectionType(section_type)) << file_path_ << " " << section_type;
SymbolTable** symbol_table = GetSymbolTable(section_type);
if (*symbol_table != nullptr || build_map) {
@@ -928,7 +934,7 @@
template <typename ElfTypes>
const char* ElfFileImpl<ElfTypes>::GetString(Elf_Shdr& string_section,
Elf_Word i) const {
- CHECK(!program_header_only_) << file_->GetPath();
+ CHECK(!program_header_only_) << file_path_;
// TODO: remove this static_cast from enum when using -std=gnu++0x
if (static_cast<Elf_Word>(SHT_STRTAB) != string_section.sh_type) {
return nullptr; // Failure condition.
@@ -954,7 +960,7 @@
template <typename ElfTypes>
typename ElfTypes::Dyn& ElfFileImpl<ElfTypes>::GetDynamic(Elf_Word i) const {
- CHECK_LT(i, GetDynamicNum()) << file_->GetPath();
+ CHECK_LT(i, GetDynamicNum()) << file_path_;
return *(GetDynamicSectionStart() + i);
}
@@ -981,40 +987,40 @@
template <typename ElfTypes>
typename ElfTypes::Rel* ElfFileImpl<ElfTypes>::GetRelSectionStart(Elf_Shdr& section_header) const {
- CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
+ CHECK(SHT_REL == section_header.sh_type) << file_path_ << " " << section_header.sh_type;
return reinterpret_cast<Elf_Rel*>(Begin() + section_header.sh_offset);
}
template <typename ElfTypes>
typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetRelNum(Elf_Shdr& section_header) const {
- CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
- CHECK_NE(0U, section_header.sh_entsize) << file_->GetPath();
+ CHECK(SHT_REL == section_header.sh_type) << file_path_ << " " << section_header.sh_type;
+ CHECK_NE(0U, section_header.sh_entsize) << file_path_;
return section_header.sh_size / section_header.sh_entsize;
}
template <typename ElfTypes>
typename ElfTypes::Rel& ElfFileImpl<ElfTypes>::GetRel(Elf_Shdr& section_header, Elf_Word i) const {
- CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
- CHECK_LT(i, GetRelNum(section_header)) << file_->GetPath();
+ CHECK(SHT_REL == section_header.sh_type) << file_path_ << " " << section_header.sh_type;
+ CHECK_LT(i, GetRelNum(section_header)) << file_path_;
return *(GetRelSectionStart(section_header) + i);
}
template <typename ElfTypes>
typename ElfTypes::Rela* ElfFileImpl<ElfTypes>::GetRelaSectionStart(Elf_Shdr& section_header) const {
- CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
+ CHECK(SHT_RELA == section_header.sh_type) << file_path_ << " " << section_header.sh_type;
return reinterpret_cast<Elf_Rela*>(Begin() + section_header.sh_offset);
}
template <typename ElfTypes>
typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetRelaNum(Elf_Shdr& section_header) const {
- CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
+ CHECK(SHT_RELA == section_header.sh_type) << file_path_ << " " << section_header.sh_type;
return section_header.sh_size / section_header.sh_entsize;
}
template <typename ElfTypes>
typename ElfTypes::Rela& ElfFileImpl<ElfTypes>::GetRela(Elf_Shdr& section_header, Elf_Word i) const {
- CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
- CHECK_LT(i, GetRelaNum(section_header)) << file_->GetPath();
+ CHECK(SHT_RELA == section_header.sh_type) << file_path_ << " " << section_header.sh_type;
+ CHECK_LT(i, GetRelaNum(section_header)) << file_path_;
return *(GetRelaSectionStart(section_header) + i);
}
@@ -1037,7 +1043,7 @@
std::ostringstream oss;
oss << "Program header #" << i << " has overflow in p_vaddr+p_memsz: 0x" << std::hex
<< program_header->p_vaddr << "+0x" << program_header->p_memsz << "=0x" << end_vaddr
- << " in ELF file \"" << file_->GetPath() << "\"";
+ << " in ELF file \"" << file_path_ << "\"";
*error_msg = oss.str();
*size = static_cast<size_t>(-1);
return false;
@@ -1048,13 +1054,13 @@
}
min_vaddr = RoundDown(min_vaddr, kPageSize);
max_vaddr = RoundUp(max_vaddr, kPageSize);
- CHECK_LT(min_vaddr, max_vaddr) << file_->GetPath();
+ CHECK_LT(min_vaddr, max_vaddr) << file_path_;
Elf_Addr loaded_size = max_vaddr - min_vaddr;
// Check that the loaded_size fits in size_t.
if (UNLIKELY(loaded_size > std::numeric_limits<size_t>::max())) {
std::ostringstream oss;
oss << "Loaded size is 0x" << std::hex << loaded_size << " but maximum size_t is 0x"
- << std::numeric_limits<size_t>::max() << " for ELF file \"" << file_->GetPath() << "\"";
+ << std::numeric_limits<size_t>::max() << " for ELF file \"" << file_path_ << "\"";
*error_msg = oss.str();
*size = static_cast<size_t>(-1);
return false;
@@ -1064,8 +1070,11 @@
}
template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* error_msg) {
- CHECK(program_header_only_) << file_->GetPath();
+bool ElfFileImpl<ElfTypes>::Load(File* file,
+ bool executable,
+ bool low_4gb,
+ std::string* error_msg) {
+ CHECK(program_header_only_) << file->GetPath();
if (executable) {
InstructionSet elf_ISA = GetInstructionSetFromELF(GetHeader().e_machine, GetHeader().e_flags);
@@ -1082,7 +1091,7 @@
Elf_Phdr* program_header = GetProgramHeader(i);
if (program_header == nullptr) {
*error_msg = StringPrintf("No program header for entry %d in ELF file %s.",
- i, file_->GetPath().c_str());
+ i, file->GetPath().c_str());
return false;
}
@@ -1106,11 +1115,11 @@
// non-zero, the segments require the specific address specified,
// which either was specified in the file because we already set
// base_address_ after the first zero segment).
- int64_t temp_file_length = file_->GetLength();
+ int64_t temp_file_length = file->GetLength();
if (temp_file_length < 0) {
errno = -temp_file_length;
*error_msg = StringPrintf("Failed to get length of file: '%s' fd=%d: %s",
- file_->GetPath().c_str(), file_->Fd(), strerror(errno));
+ file->GetPath().c_str(), file->Fd(), strerror(errno));
return false;
}
size_t file_length = static_cast<size_t>(temp_file_length);
@@ -1122,7 +1131,7 @@
reserve_base_override = requested_base_;
}
std::string reservation_name("ElfFile reservation for ");
- reservation_name += file_->GetPath();
+ reservation_name += file->GetPath();
size_t loaded_size;
if (!GetLoadedSize(&loaded_size, error_msg)) {
DCHECK(!error_msg->empty());
@@ -1178,7 +1187,7 @@
*error_msg = StringPrintf("Invalid p_filesz > p_memsz (%" PRIu64 " > %" PRIu64 "): %s",
static_cast<uint64_t>(program_header->p_filesz),
static_cast<uint64_t>(program_header->p_memsz),
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (program_header->p_filesz < program_header->p_memsz &&
@@ -1187,14 +1196,14 @@
" < %" PRIu64 "): %s",
static_cast<uint64_t>(program_header->p_filesz),
static_cast<uint64_t>(program_header->p_memsz),
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (file_length < (program_header->p_offset + program_header->p_filesz)) {
*error_msg = StringPrintf("File size of %zd bytes not large enough to contain ELF segment "
"%d of %" PRIu64 " bytes: '%s'", file_length, i,
static_cast<uint64_t>(program_header->p_offset + program_header->p_filesz),
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (program_header->p_filesz != 0u) {
@@ -1203,28 +1212,28 @@
program_header->p_filesz,
prot,
flags,
- file_->Fd(),
+ file->Fd(),
program_header->p_offset,
/*low4_gb*/false,
/*reuse*/true, // implies MAP_FIXED
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
error_msg));
if (segment.get() == nullptr) {
*error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
- i, file_->GetPath().c_str(), error_msg->c_str());
+ i, file->GetPath().c_str(), error_msg->c_str());
return false;
}
if (segment->Begin() != p_vaddr) {
*error_msg = StringPrintf("Failed to map ELF file segment %d from %s at expected address %p, "
"instead mapped to %p",
- i, file_->GetPath().c_str(), p_vaddr, segment->Begin());
+ i, file->GetPath().c_str(), p_vaddr, segment->Begin());
return false;
}
segments_.push_back(segment.release());
}
if (program_header->p_filesz < program_header->p_memsz) {
std::string name = StringPrintf("Zero-initialized segment %" PRIu64 " of ELF file %s",
- static_cast<uint64_t>(i), file_->GetPath().c_str());
+ static_cast<uint64_t>(i), file->GetPath().c_str());
std::unique_ptr<MemMap> segment(
MemMap::MapAnonymous(name.c_str(),
p_vaddr + program_header->p_filesz,
@@ -1232,13 +1241,13 @@
prot, false, true /* reuse */, error_msg));
if (segment == nullptr) {
*error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s: %s",
- i, file_->GetPath().c_str(), error_msg->c_str());
+ i, file->GetPath().c_str(), error_msg->c_str());
return false;
}
if (segment->Begin() != p_vaddr) {
*error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s "
"at expected address %p, instead mapped to %p",
- i, file_->GetPath().c_str(), p_vaddr, segment->Begin());
+ i, file->GetPath().c_str(), p_vaddr, segment->Begin());
return false;
}
segments_.push_back(segment.release());
@@ -1249,7 +1258,7 @@
uint8_t* dsptr = base_address_ + GetDynamicProgramHeader().p_vaddr;
if ((dsptr < Begin() || dsptr >= End()) && !ValidPointer(dsptr)) {
*error_msg = StringPrintf("dynamic section address invalid in ELF file %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
dynamic_section_start_ = reinterpret_cast<Elf_Dyn*>(dsptr);
@@ -1261,7 +1270,7 @@
case DT_HASH: {
if (!ValidPointer(d_ptr)) {
*error_msg = StringPrintf("DT_HASH value %p does not refer to a loaded ELF segment of %s",
- d_ptr, file_->GetPath().c_str());
+ d_ptr, file->GetPath().c_str());
return false;
}
hash_section_start_ = reinterpret_cast<Elf_Word*>(d_ptr);
@@ -1270,7 +1279,7 @@
case DT_STRTAB: {
if (!ValidPointer(d_ptr)) {
*error_msg = StringPrintf("DT_HASH value %p does not refer to a loaded ELF segment of %s",
- d_ptr, file_->GetPath().c_str());
+ d_ptr, file->GetPath().c_str());
return false;
}
dynstr_section_start_ = reinterpret_cast<char*>(d_ptr);
@@ -1279,7 +1288,7 @@
case DT_SYMTAB: {
if (!ValidPointer(d_ptr)) {
*error_msg = StringPrintf("DT_HASH value %p does not refer to a loaded ELF segment of %s",
- d_ptr, file_->GetPath().c_str());
+ d_ptr, file->GetPath().c_str());
return false;
}
dynsym_section_start_ = reinterpret_cast<Elf_Sym*>(d_ptr);
@@ -1289,7 +1298,7 @@
if (GetDynamicNum() != i+1) {
*error_msg = StringPrintf("DT_NULL found after %d .dynamic entries, "
"expected %d as implied by size of PT_DYNAMIC segment in %s",
- i + 1, GetDynamicNum(), file_->GetPath().c_str());
+ i + 1, GetDynamicNum(), file->GetPath().c_str());
return false;
}
break;
@@ -1298,7 +1307,7 @@
}
// Check for the existence of some sections.
- if (!CheckSectionsExist(error_msg)) {
+ if (!CheckSectionsExist(file, error_msg)) {
return false;
}
@@ -1392,7 +1401,7 @@
}
template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::Strip(std::string* error_msg) {
+bool ElfFileImpl<ElfTypes>::Strip(File* file, std::string* error_msg) {
// ELF files produced by MCLinker look roughly like this
//
// +------------+
@@ -1484,10 +1493,10 @@
GetHeader().e_shnum = section_headers.size();
GetHeader().e_shoff = shoff;
- int result = ftruncate(file_->Fd(), offset);
+ int result = ftruncate(file->Fd(), offset);
if (result != 0) {
*error_msg = StringPrintf("Failed to truncate while stripping ELF file: '%s': %s",
- file_->GetPath().c_str(), strerror(errno));
+ file->GetPath().c_str(), strerror(errno));
return false;
}
return true;
@@ -1498,32 +1507,32 @@
template <typename ElfTypes>
bool ElfFileImpl<ElfTypes>::Fixup(Elf_Addr base_address) {
if (!FixupDynamic(base_address)) {
- LOG(WARNING) << "Failed to fixup .dynamic in " << file_->GetPath();
+ LOG(WARNING) << "Failed to fixup .dynamic in " << file_path_;
return false;
}
if (!FixupSectionHeaders(base_address)) {
- LOG(WARNING) << "Failed to fixup section headers in " << file_->GetPath();
+ LOG(WARNING) << "Failed to fixup section headers in " << file_path_;
return false;
}
if (!FixupProgramHeaders(base_address)) {
- LOG(WARNING) << "Failed to fixup program headers in " << file_->GetPath();
+ LOG(WARNING) << "Failed to fixup program headers in " << file_path_;
return false;
}
if (!FixupSymbols(base_address, true)) {
- LOG(WARNING) << "Failed to fixup .dynsym in " << file_->GetPath();
+ LOG(WARNING) << "Failed to fixup .dynsym in " << file_path_;
return false;
}
if (!FixupSymbols(base_address, false)) {
- LOG(WARNING) << "Failed to fixup .symtab in " << file_->GetPath();
+ LOG(WARNING) << "Failed to fixup .symtab in " << file_path_;
return false;
}
if (!FixupRelocations(base_address)) {
- LOG(WARNING) << "Failed to fixup .rel.dyn in " << file_->GetPath();
+ LOG(WARNING) << "Failed to fixup .rel.dyn in " << file_path_;
return false;
}
static_assert(sizeof(Elf_Off) >= sizeof(base_address), "Potentially losing precision.");
if (!FixupDebugSections(static_cast<Elf_Off>(base_address))) {
- LOG(WARNING) << "Failed to fixup debug sections in " << file_->GetPath();
+ LOG(WARNING) << "Failed to fixup debug sections in " << file_path_;
return false;
}
return true;
@@ -1538,7 +1547,7 @@
Elf_Addr d_ptr = elf_dyn.d_un.d_ptr;
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Dyn[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), i,
+ file_path_.c_str(), i,
static_cast<uint64_t>(d_ptr),
static_cast<uint64_t>(d_ptr + base_address));
}
@@ -1560,7 +1569,7 @@
}
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Shdr[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), i,
+ file_path_.c_str(), i,
static_cast<uint64_t>(sh->sh_addr),
static_cast<uint64_t>(sh->sh_addr + base_address));
}
@@ -1575,19 +1584,19 @@
for (Elf_Word i = 0; i < GetProgramHeaderNum(); i++) {
Elf_Phdr* ph = GetProgramHeader(i);
CHECK(ph != nullptr);
- CHECK_EQ(ph->p_vaddr, ph->p_paddr) << GetFile().GetPath() << " i=" << i;
+ CHECK_EQ(ph->p_vaddr, ph->p_paddr) << file_path_ << " i=" << i;
CHECK((ph->p_align == 0) || (0 == ((ph->p_vaddr - ph->p_offset) & (ph->p_align - 1))))
- << GetFile().GetPath() << " i=" << i;
+ << file_path_ << " i=" << i;
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Phdr[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), i,
+ file_path_.c_str(), i,
static_cast<uint64_t>(ph->p_vaddr),
static_cast<uint64_t>(ph->p_vaddr + base_address));
}
ph->p_vaddr += base_address;
ph->p_paddr += base_address;
CHECK((ph->p_align == 0) || (0 == ((ph->p_vaddr - ph->p_offset) & (ph->p_align - 1))))
- << GetFile().GetPath() << " i=" << i;
+ << file_path_ << " i=" << i;
}
return true;
}
@@ -1599,7 +1608,7 @@
Elf_Shdr* symbol_section = FindSectionByType(section_type);
if (symbol_section == nullptr) {
// file is missing optional .symtab
- CHECK(!dynamic) << GetFile().GetPath();
+ CHECK(!dynamic) << file_path_;
return true;
}
for (uint32_t i = 0; i < GetSymbolNum(*symbol_section); i++) {
@@ -1608,7 +1617,7 @@
if (symbol->st_value != 0) {
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Sym[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), i,
+ file_path_.c_str(), i,
static_cast<uint64_t>(symbol->st_value),
static_cast<uint64_t>(symbol->st_value + base_address));
}
@@ -1628,7 +1637,7 @@
Elf_Rel& rel = GetRel(*sh, j);
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Rel[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), j,
+ file_path_.c_str(), j,
static_cast<uint64_t>(rel.r_offset),
static_cast<uint64_t>(rel.r_offset + base_address));
}
@@ -1639,7 +1648,7 @@
Elf_Rela& rela = GetRela(*sh, j);
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Rela[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), j,
+ file_path_.c_str(), j,
static_cast<uint64_t>(rela.r_offset),
static_cast<uint64_t>(rela.r_offset + base_address));
}
@@ -1695,8 +1704,9 @@
low_4gb,
error_msg,
requested_base);
- if (elf_file_impl == nullptr)
+ if (elf_file_impl == nullptr) {
return nullptr;
+ }
return new ElfFile(elf_file_impl);
} else if (header[EI_CLASS] == ELFCLASS32) {
ElfFileImpl32* elf_file_impl = ElfFileImpl32::Open(file,
@@ -1775,8 +1785,8 @@
return elf32_->func(__VA_ARGS__); \
}
-bool ElfFile::Load(bool executable, bool low_4gb, std::string* error_msg) {
- DELEGATE_TO_IMPL(Load, executable, low_4gb, error_msg);
+bool ElfFile::Load(File* file, bool executable, bool low_4gb, std::string* error_msg) {
+ DELEGATE_TO_IMPL(Load, file, executable, low_4gb, error_msg);
}
const uint8_t* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) const {
@@ -1795,8 +1805,8 @@
DELEGATE_TO_IMPL(End);
}
-const File& ElfFile::GetFile() const {
- DELEGATE_TO_IMPL(GetFile);
+const std::string& ElfFile::GetFilePath() const {
+ DELEGATE_TO_IMPL(GetFilePath);
}
bool ElfFile::GetSectionOffsetAndSize(const char* section_name, uint64_t* offset,
@@ -1854,10 +1864,11 @@
return false;
}
- if (elf_file->elf64_.get() != nullptr)
- return elf_file->elf64_->Strip(error_msg);
- else
- return elf_file->elf32_->Strip(error_msg);
+ if (elf_file->elf64_.get() != nullptr) {
+ return elf_file->elf64_->Strip(file, error_msg);
+ } else {
+ return elf_file->elf32_->Strip(file, error_msg);
+ }
}
bool ElfFile::Fixup(uint64_t base_address) {
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index c3616f7..b1c9395 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -53,7 +53,7 @@
~ElfFile();
// Load segments into memory based on PT_LOAD program headers
- bool Load(bool executable, bool low_4gb, std::string* error_msg);
+ bool Load(File* file, bool executable, bool low_4gb, std::string* error_msg);
const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name) const;
@@ -65,7 +65,7 @@
// The end of the memory map address range for this ELF file.
uint8_t* End() const;
- const File& GetFile() const;
+ const std::string& GetFilePath() const;
bool GetSectionOffsetAndSize(const char* section_name, uint64_t* offset, uint64_t* size) const;
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index 1cdbedc..04c2243 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -61,8 +61,8 @@
std::string* error_msg);
~ElfFileImpl();
- const File& GetFile() const {
- return *file_;
+ const std::string& GetFilePath() const {
+ return file_path_;
}
uint8_t* Begin() const {
@@ -119,7 +119,7 @@
// Load segments into memory based on PT_LOAD program headers.
// executable is true at run time, false at compile time.
- bool Load(bool executable, bool low_4gb, std::string* error_msg);
+ bool Load(File* file, bool executable, bool low_4gb, std::string* error_msg);
bool Fixup(Elf_Addr base_address);
bool FixupDynamic(Elf_Addr base_address);
@@ -132,14 +132,14 @@
static void ApplyOatPatches(const uint8_t* patches, const uint8_t* patches_end, Elf_Addr delta,
uint8_t* to_patch, const uint8_t* to_patch_end);
- bool Strip(std::string* error_msg);
+ bool Strip(File* file, std::string* error_msg);
private:
ElfFileImpl(File* file, bool writable, bool program_header_only, uint8_t* requested_base);
- bool Setup(int prot, int flags, bool low_4gb, std::string* error_msg);
+ bool Setup(File* file, int prot, int flags, bool low_4gb, std::string* error_msg);
- bool SetMap(MemMap* map, std::string* error_msg);
+ bool SetMap(File* file, MemMap* map, std::string* error_msg);
uint8_t* GetProgramHeadersStart() const;
uint8_t* GetSectionHeadersStart() const;
@@ -163,7 +163,7 @@
const Elf_Sym* FindDynamicSymbol(const std::string& symbol_name) const;
// Check that certain sections and their dependencies exist.
- bool CheckSectionsExist(std::string* error_msg) const;
+ bool CheckSectionsExist(File* file, std::string* error_msg) const;
// Check that the link of the first section links to the second section.
bool CheckSectionsLinked(const uint8_t* source, const uint8_t* target) const;
@@ -191,7 +191,7 @@
// Lookup a string by section type. Returns null for special 0 offset.
const char* GetString(Elf_Word section_type, Elf_Word) const;
- const File* const file_;
+ const std::string file_path_;
const bool writable_;
const bool program_header_only_;
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 4311d19..2a3a6bf 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -63,12 +63,14 @@
auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
mirror::String* result = ResolveStringFromCode(caller, string_idx);
if (LIKELY(result != nullptr)) {
- // For AOT code, we need a write barrier for the dex cache that holds the GC roots in the .bss.
+ // For AOT code, we need a write barrier for the class loader that holds
+ // the GC roots in the .bss.
const DexFile* dex_file = caller->GetDexFile();
if (dex_file != nullptr &&
dex_file->GetOatDexFile() != nullptr &&
!dex_file->GetOatDexFile()->GetOatFile()->GetBssGcRoots().empty()) {
mirror::ClassLoader* class_loader = caller->GetDeclaringClass()->GetClassLoader();
+ DCHECK(class_loader != nullptr); // We do not use .bss GC roots for boot image.
// Note that we emit the barrier before the compiled code stores the string as GC root.
// This is OK as there is no suspend point point in between.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 20fa0d8..383cdd2 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -24,6 +24,15 @@
extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_on_stack,
Thread* self ATTRIBUTE_UNUSED) {
+ DCHECK(kUseReadBarrier);
+ if (kUseBakerReadBarrier) {
+ DCHECK(handle_on_stack->AsMirrorPtr() != nullptr)
+ << "The class of a static jni call must not be null";
+ // Check the mark bit and return early if it's already marked.
+ if (LIKELY(handle_on_stack->AsMirrorPtr()->GetMarkBit() != 0)) {
+ return;
+ }
+ }
// Call the read barrier and update the handle.
mirror::Object* to_ref = ReadBarrier::BarrierForRoot(handle_on_stack);
handle_on_stack->Assign(to_ref);
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 7229f76..29bab01 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -66,7 +66,9 @@
: collector_(collector), target_space_(target_space),
contains_reference_to_target_space_(contains_reference_to_target_space) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
@@ -77,7 +79,7 @@
}
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
if (target_space_->HasAddress(ref->GetReferent())) {
*contains_reference_to_target_space_ = true;
@@ -115,7 +117,7 @@
: collector_(collector), target_space_(target_space),
contains_reference_to_target_space_(contains_reference_to_target_space) {}
- void operator()(mirror::Object* obj) const REQUIRES(Locks::heap_bitmap_lock_)
+ void operator()(ObjPtr<mirror::Object> obj) const REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
RememberedSetReferenceVisitor visitor(target_space_, contains_reference_to_target_space_,
collector_);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index dabb6da..3dee974 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -354,14 +354,14 @@
explicit VerifyGrayImmuneObjectsVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
+ void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, bool /* is_static */)
const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
obj, offset);
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
@@ -386,13 +386,15 @@
private:
ConcurrentCopying* const collector_;
- void CheckReference(mirror::Object* ref, mirror::Object* holder, MemberOffset offset) const
+ void CheckReference(ObjPtr<mirror::Object> ref,
+ ObjPtr<mirror::Object> holder,
+ MemberOffset offset) const
REQUIRES_SHARED(Locks::mutator_lock_) {
if (ref != nullptr) {
- if (!collector_->immune_spaces_.ContainsObject(ref)) {
+ if (!collector_->immune_spaces_.ContainsObject(ref.Ptr())) {
// Not immune, must be a zygote large object.
CHECK(Runtime::Current()->GetHeap()->GetLargeObjectsSpace()->IsZygoteLargeObject(
- Thread::Current(), ref))
+ Thread::Current(), ref.Ptr()))
<< "Non gray object references non immune, non zygote large object "<< ref << " "
<< PrettyTypeOf(ref) << " in holder " << holder << " " << PrettyTypeOf(holder)
<< " offset=" << offset.Uint32Value();
@@ -969,14 +971,17 @@
explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
VerifyNoFromSpaceRefsVisitor visitor(collector_);
visitor(ref);
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
this->operator()(ref, mirror::Reference::ReferentOffset(), false);
@@ -1091,14 +1096,16 @@
explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
AssertToSpaceInvariantRefsVisitor visitor(collector_);
visitor(ref);
}
- void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
}
@@ -1780,13 +1787,13 @@
explicit RefFieldsVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
+ void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, bool /* is_static */)
const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
- collector_->Process(obj, offset);
+ collector_->Process(obj.Ptr(), offset);
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
collector_->DelayReferenceReferent(klass, ref);
@@ -2377,7 +2384,8 @@
return Mark(from_ref);
}
-void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
+void ConcurrentCopying::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference) {
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 81ffbc5..5b8a557 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -169,7 +169,8 @@
void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) OVERRIDE
+ virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference) OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_);
void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 4ffa254..5b51399 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -196,7 +196,8 @@
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
- virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
protected:
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 6d2f009..e0bf744 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -418,7 +418,7 @@
collector_->UpdateHeapReference(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
}
- void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> /*klass*/, mirror::Reference* ref) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
collector_->UpdateHeapReference(
ref->GetFieldObjectReferenceAddr<kVerifyNone>(mirror::Reference::ReferentOffset()));
@@ -543,7 +543,8 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
-void MarkCompact::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
+void MarkCompact::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference) {
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
@@ -551,13 +552,16 @@
public:
explicit MarkObjectVisitor(MarkCompact* collector) : collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool /*is_static*/) const ALWAYS_INLINE
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Object was already verified when we scanned it.
collector_->MarkObject(obj->GetFieldObject<mirror::Object, kVerifyNone>(offset));
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
collector_->DelayReferenceReferent(klass, ref);
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index a61646c..564f85b 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -122,7 +122,7 @@
OVERRIDE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Schedules an unmarked object for reference processing.
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index b89d99c..c05719d 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -272,7 +272,7 @@
explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
: mark_sweep_(mark_sweep) {}
- void operator()(mirror::Object* obj) const
+ void operator()(ObjPtr<mirror::Object> obj) const
ALWAYS_INLINE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -280,7 +280,7 @@
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
}
- mark_sweep_->ScanObject(obj);
+ mark_sweep_->ScanObject(obj.Ptr());
}
private:
@@ -616,7 +616,7 @@
public:
explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
collector_->DelayReferenceReferent(klass, ref);
@@ -1297,9 +1297,9 @@
}
}
-// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
+// Process the "referent" field lin a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
-void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
+void MarkSweep::DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) {
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index bbac9da..19c2e9a 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -225,7 +225,7 @@
}
// Schedules an unmarked object for reference processing.
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 76a478e..2cb1767 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -679,7 +679,8 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
-void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
+void SemiSpace::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference) {
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
@@ -687,13 +688,13 @@
public:
explicit MarkObjectVisitor(SemiSpace* collector) : collector_(collector) {}
- void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
+ void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Object was already verified when we scanned it.
collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
collector_->DelayReferenceReferent(klass, ref);
}
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 4b63d9b..4cebcc3 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -156,7 +156,7 @@
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Schedules an unmarked object for reference processing.
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index bf5af8e..45bd87b 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2894,19 +2894,21 @@
return fail_count_->LoadSequentiallyConsistent();
}
- void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) {
if (verify_referent_) {
- VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
+ VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset());
}
}
- void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
- VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
+ VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset);
}
- bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ bool IsLive(ObjPtr<mirror::Object> obj) const NO_THREAD_SAFETY_ANALYSIS {
return heap_->IsLiveObjectLocked(obj, true, false, true);
}
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 9694597..4b8f38d 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -60,12 +60,13 @@
condition_.Broadcast(self);
}
-mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) {
+ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
+ ObjPtr<mirror::Reference> reference) {
if (!kUseReadBarrier || self->GetWeakRefAccessEnabled()) {
// Under read barrier / concurrent copying collector, it's not safe to call GetReferent() when
// weak ref access is disabled as the call includes a read barrier which may push a ref onto the
// mark stack and interfere with termination of marking.
- mirror::Object* const referent = reference->GetReferent();
+ ObjPtr<mirror::Object> const referent = reference->GetReferent();
// If the referent is null then it is already cleared, we can just return null since there is no
// scenario where it becomes non-null during the reference processing phase.
if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
@@ -116,7 +117,8 @@
}
// Process reference class instances and schedule finalizations.
-void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
+void ReferenceProcessor::ProcessReferences(bool concurrent,
+ TimingLogger* timings,
bool clear_soft_references,
collector::GarbageCollector* collector) {
TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
@@ -188,7 +190,8 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
-void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
+void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref,
collector::GarbageCollector* collector) {
// klass can be the class of the old object if the visitor already updated the class of ref.
DCHECK(klass != nullptr);
@@ -260,7 +263,8 @@
}
}
-bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) {
+bool ReferenceProcessor::MakeCircularListIfUnenqueued(
+ ObjPtr<mirror::FinalizerReference> reference) {
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::reference_processor_lock_);
// Wait untul we are done processing reference.
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 4788f8a..759b7e1 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -46,7 +46,9 @@
class ReferenceProcessor {
public:
explicit ReferenceProcessor();
- void ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references,
+ void ProcessReferences(bool concurrent,
+ TimingLogger* timings,
+ bool clear_soft_references,
gc::collector::GarbageCollector* collector)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_)
@@ -57,16 +59,17 @@
void EnableSlowPath() REQUIRES_SHARED(Locks::mutator_lock_);
void BroadcastForSlowPath(Thread* self);
// Decode the referent, may block if references are being processed.
- mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
+ ObjPtr<mirror::Object> GetReferent(Thread* self, ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::reference_processor_lock_);
void EnqueueClearedReferences(Thread* self) REQUIRES(!Locks::mutator_lock_);
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref,
collector::GarbageCollector* collector)
REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateRoots(IsMarkedVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock.
- bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference)
+ bool MakeCircularListIfUnenqueued(ObjPtr<mirror::FinalizerReference> reference)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::reference_processor_lock_,
!Locks::reference_queue_finalizer_references_lock_);
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 62625c4..4e6f7da 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -29,7 +29,7 @@
ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
}
-void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
+void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref) {
DCHECK(ref != nullptr);
MutexLock mu(self, *lock_);
if (ref->IsUnprocessed()) {
@@ -37,16 +37,16 @@
}
}
-void ReferenceQueue::EnqueueReference(mirror::Reference* ref) {
+void ReferenceQueue::EnqueueReference(ObjPtr<mirror::Reference> ref) {
DCHECK(ref != nullptr);
CHECK(ref->IsUnprocessed());
if (IsEmpty()) {
// 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
- list_ = ref;
+ list_ = ref.Ptr();
} else {
// The list is owned by the GC, everything that has been inserted must already be at least
// gray.
- mirror::Reference* head = list_->GetPendingNext<kWithoutReadBarrier>();
+ ObjPtr<mirror::Reference> head = list_->GetPendingNext<kWithoutReadBarrier>();
DCHECK(head != nullptr);
ref->SetPendingNext(head);
}
@@ -54,16 +54,16 @@
list_->SetPendingNext(ref);
}
-mirror::Reference* ReferenceQueue::DequeuePendingReference() {
+ObjPtr<mirror::Reference> ReferenceQueue::DequeuePendingReference() {
DCHECK(!IsEmpty());
- mirror::Reference* ref = list_->GetPendingNext<kWithoutReadBarrier>();
+ ObjPtr<mirror::Reference> ref = list_->GetPendingNext<kWithoutReadBarrier>();
DCHECK(ref != nullptr);
// Note: the following code is thread-safe because it is only called from ProcessReferences which
// is single threaded.
if (list_ == ref) {
list_ = nullptr;
} else {
- mirror::Reference* next = ref->GetPendingNext<kWithoutReadBarrier>();
+ ObjPtr<mirror::Reference> next = ref->GetPendingNext<kWithoutReadBarrier>();
list_->SetPendingNext(next);
}
ref->SetPendingNext(nullptr);
@@ -83,10 +83,10 @@
// In ConcurrentCopying::ProcessMarkStackRef() we may leave a white reference in the queue and
// find it here, which is OK.
CHECK_EQ(rb_ptr, ReadBarrier::WhitePtr()) << "ref=" << ref << " rb_ptr=" << rb_ptr;
- mirror::Object* referent = ref->GetReferent<kWithoutReadBarrier>();
+ ObjPtr<mirror::Object> referent = ref->GetReferent<kWithoutReadBarrier>();
// The referent could be null if it's cleared by a mutator (Reference.clear()).
if (referent != nullptr) {
- CHECK(concurrent_copying->IsInToSpace(referent))
+ CHECK(concurrent_copying->IsInToSpace(referent.Ptr()))
<< "ref=" << ref << " rb_ptr=" << ref->GetReadBarrierPointer()
<< " referent=" << referent;
}
@@ -96,13 +96,13 @@
}
void ReferenceQueue::Dump(std::ostream& os) const {
- mirror::Reference* cur = list_;
+ ObjPtr<mirror::Reference> cur = list_;
os << "Reference starting at list_=" << list_ << "\n";
if (cur == nullptr) {
return;
}
do {
- mirror::Reference* pending_next = cur->GetPendingNext();
+ ObjPtr<mirror::Reference> pending_next = cur->GetPendingNext();
os << "Reference= " << cur << " PendingNext=" << pending_next;
if (cur->IsFinalizerReferenceInstance()) {
os << " Zombie=" << cur->AsFinalizerReference()->GetZombie();
@@ -114,7 +114,7 @@
size_t ReferenceQueue::GetLength() const {
size_t count = 0;
- mirror::Reference* cur = list_;
+ ObjPtr<mirror::Reference> cur = list_;
if (cur != nullptr) {
do {
++count;
@@ -127,7 +127,7 @@
void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
collector::GarbageCollector* collector) {
while (!IsEmpty()) {
- mirror::Reference* ref = DequeuePendingReference();
+ ObjPtr<mirror::Reference> ref = DequeuePendingReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
if (referent_addr->AsMirrorPtr() != nullptr &&
!collector->IsMarkedHeapReference(referent_addr)) {
@@ -145,11 +145,11 @@
void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
collector::GarbageCollector* collector) {
while (!IsEmpty()) {
- mirror::FinalizerReference* ref = DequeuePendingReference()->AsFinalizerReference();
+ ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
if (referent_addr->AsMirrorPtr() != nullptr &&
!collector->IsMarkedHeapReference(referent_addr)) {
- mirror::Object* forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
+ ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
// Move the updated referent to the zombie field.
if (Runtime::Current()->IsActiveTransaction()) {
ref->SetZombie<true>(forward_address);
@@ -167,8 +167,8 @@
if (UNLIKELY(IsEmpty())) {
return;
}
- mirror::Reference* const head = list_;
- mirror::Reference* ref = head;
+ ObjPtr<mirror::Reference> const head = list_;
+ ObjPtr<mirror::Reference> ref = head;
do {
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
if (referent_addr->AsMirrorPtr() != nullptr) {
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 1de1aa1..b5ec1e5 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -26,6 +26,7 @@
#include "base/timing_logger.h"
#include "globals.h"
#include "jni.h"
+#include "obj_ptr.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "thread_pool.h"
@@ -54,15 +55,15 @@
// Enqueue a reference if it is unprocessed. Thread safe to call from multiple
// threads since it uses a lock to avoid a race between checking for the references presence and
// adding it.
- void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
+ void AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*lock_);
// Enqueue a reference. The reference must be unprocessed.
// Not thread safe, used when mutators are paused to minimize lock overhead.
- void EnqueueReference(mirror::Reference* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ void EnqueueReference(ObjPtr<mirror::Reference> ref) REQUIRES_SHARED(Locks::mutator_lock_);
// Dequeue a reference from the queue and return that dequeued reference.
- mirror::Reference* DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<mirror::Reference> DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_);
// Enqueues finalizer references with white referents. White referents are blackened, moved to
// the zombie field, and the referent field is cleared.
@@ -104,7 +105,7 @@
// calling AtomicEnqueueIfNotEnqueued.
Mutex* const lock_;
// The actual reference list. Only a root for the mark compact GC since it will be null for other
- // GC types.
+ // GC types. Not an ObjPtr since it is accessed from multiple threads.
mirror::Reference* list_;
DISALLOW_IMPLICIT_CONSTRUCTORS(ReferenceQueue);
diff --git a/runtime/gc/reference_queue_test.cc b/runtime/gc/reference_queue_test.cc
index 5b8a3c2..3ca3353 100644
--- a/runtime/gc/reference_queue_test.cc
+++ b/runtime/gc/reference_queue_test.cc
@@ -52,10 +52,10 @@
std::set<mirror::Reference*> refs = {ref1.Get(), ref2.Get()};
std::set<mirror::Reference*> dequeued;
- dequeued.insert(queue.DequeuePendingReference());
+ dequeued.insert(queue.DequeuePendingReference().Ptr());
ASSERT_TRUE(!queue.IsEmpty());
ASSERT_EQ(queue.GetLength(), 1U);
- dequeued.insert(queue.DequeuePendingReference());
+ dequeued.insert(queue.DequeuePendingReference().Ptr());
ASSERT_EQ(queue.GetLength(), 0U);
ASSERT_TRUE(queue.IsEmpty());
ASSERT_EQ(refs, dequeued);
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index bc63b38..e9c8b95 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -912,7 +912,7 @@
ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
const {}
- ALWAYS_INLINE void operator()(mirror::Object* obj,
+ ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> obj,
MemberOffset offset,
bool is_static ATTRIBUTE_UNUSED) const
NO_THREAD_SAFETY_ANALYSIS {
@@ -949,7 +949,8 @@
}
// java.lang.ref.Reference visitor.
- void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
mirror::Object* obj = ref->GetReferent<kWithoutReadBarrier>();
ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index be061be..a61a187 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -33,8 +33,7 @@
namespace art {
InternTable::InternTable()
- : images_added_to_intern_table_(false),
- log_new_roots_(false),
+ : log_new_roots_(false),
weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
weak_root_state_(gc::kWeakRootStateNormal) {
}
@@ -181,57 +180,8 @@
const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings);
if (section.Size() > 0) {
AddTableFromMemoryLocked(image_space->Begin() + section.Offset());
- } else {
- // TODO: Delete this logic?
- mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches);
- mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
- for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- const size_t num_strings = dex_cache->NumStrings();
- for (size_t j = 0; j < num_strings; ++j) {
- mirror::String* image_string = dex_cache->GetResolvedString(j);
- if (image_string != nullptr) {
- mirror::String* found = LookupStrongLocked(image_string);
- if (found == nullptr) {
- InsertStrong(image_string);
- } else {
- DCHECK_EQ(found, image_string);
- }
- }
- }
- }
}
}
- images_added_to_intern_table_ = true;
-}
-
-mirror::String* InternTable::LookupStringFromImage(mirror::String* s) {
- DCHECK(!images_added_to_intern_table_);
- const std::vector<gc::space::ImageSpace*>& image_spaces =
- Runtime::Current()->GetHeap()->GetBootImageSpaces();
- if (image_spaces.empty()) {
- return nullptr; // No image present.
- }
- const std::string utf8 = s->ToModifiedUtf8();
- for (gc::space::ImageSpace* image_space : image_spaces) {
- mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
- mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
- for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- const DexFile* dex_file = dex_cache->GetDexFile();
- // Binary search the dex file for the string index.
- const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
- if (string_id != nullptr) {
- uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
- // GetResolvedString() contains a RB.
- mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
- if (image_string != nullptr) {
- return image_string;
- }
- }
- }
- }
- return nullptr;
}
void InternTable::BroadcastForNewInterns() {
@@ -303,13 +253,6 @@
}
return weak;
}
- // Check the image for a match.
- if (!images_added_to_intern_table_) {
- mirror::String* const image_string = LookupStringFromImage(s);
- if (image_string != nullptr) {
- return is_strong ? InsertStrong(image_string) : InsertWeak(image_string);
- }
- }
// No match in the strong table or the weak table. Insert into the strong / weak table.
return is_strong ? InsertStrong(s) : InsertWeak(s);
}
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 184fbdc..30ff55d 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -238,8 +238,6 @@
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
// Transaction rollback access.
- mirror::String* LookupStringFromImage(mirror::String* s)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertStrongFromTransaction(mirror::String* s)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertWeakFromTransaction(mirror::String* s)
@@ -260,7 +258,6 @@
void WaitUntilAccessible(Thread* self)
REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
- bool images_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
ConditionVariable weak_intern_condition_ GUARDED_BY(Locks::intern_table_lock_);
// Since this contains (strong) roots, they need a read barrier to
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 46b9e80..4a3654b 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1186,13 +1186,13 @@
// This allows statically initializing ConcurrentHashMap and SynchronousQueue.
void UnstartedRuntime::UnstartedReferenceGetReferent(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
- mirror::Reference* const ref = down_cast<mirror::Reference*>(
+ ObjPtr<mirror::Reference> const ref = down_cast<mirror::Reference*>(
shadow_frame->GetVRegReference(arg_offset));
if (ref == nullptr) {
AbortTransactionOrFail(self, "Reference.getReferent() with null object");
return;
}
- mirror::Object* const referent =
+ ObjPtr<mirror::Object> const referent =
Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(self, ref);
result->SetL(referent);
}
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index c5f95eb..f2bda05 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -422,14 +422,14 @@
tracing_enabled_(runtime_options.Exists(RuntimeArgumentMap::JniTrace)
|| VLOG_IS_ON(third_party_jni)),
trace_(runtime_options.GetOrDefault(RuntimeArgumentMap::JniTrace)),
- globals_lock_("JNI global reference table lock"),
globals_(gGlobalsInitial, gGlobalsMax, kGlobal),
libraries_(new Libraries),
unchecked_functions_(&gJniInvokeInterface),
- weak_globals_lock_("JNI weak global reference table lock", kJniWeakGlobalsLock),
weak_globals_(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
allow_accessing_weak_globals_(true),
- weak_globals_add_condition_("weak globals add condition", weak_globals_lock_),
+ weak_globals_add_condition_("weak globals add condition",
+ (CHECK(Locks::jni_weak_globals_lock_ != nullptr),
+ *Locks::jni_weak_globals_lock_)),
env_hooks_() {
functions = unchecked_functions_;
SetCheckJniEnabled(runtime_options.Exists(RuntimeArgumentMap::CheckJni));
@@ -537,7 +537,7 @@
if (obj == nullptr) {
return nullptr;
}
- WriterMutexLock mu(self, globals_lock_);
+ WriterMutexLock mu(self, *Locks::jni_globals_lock_);
IndirectRef ref = globals_.Add(IRT_FIRST_SEGMENT, obj);
return reinterpret_cast<jobject>(ref);
}
@@ -546,7 +546,7 @@
if (obj == nullptr) {
return nullptr;
}
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
}
@@ -558,7 +558,7 @@
if (obj == nullptr) {
return;
}
- WriterMutexLock mu(self, globals_lock_);
+ WriterMutexLock mu(self, *Locks::jni_globals_lock_);
if (!globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
<< "failed to find entry";
@@ -569,7 +569,7 @@
if (obj == nullptr) {
return;
}
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
if (!weak_globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
<< "failed to find entry";
@@ -597,11 +597,11 @@
}
Thread* self = Thread::Current();
{
- ReaderMutexLock mu(self, globals_lock_);
+ ReaderMutexLock mu(self, *Locks::jni_globals_lock_);
os << "; globals=" << globals_.Capacity();
}
{
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
if (weak_globals_.Capacity() > 0) {
os << " (plus " << weak_globals_.Capacity() << " weak)";
}
@@ -617,7 +617,7 @@
void JavaVMExt::DisallowNewWeakGlobals() {
CHECK(!kUseReadBarrier);
Thread* const self = Thread::Current();
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
// DisallowNewWeakGlobals is only called by CMS during the pause. It is required to have the
// mutator lock exclusively held so that we don't have any threads in the middle of
// DecodeWeakGlobal.
@@ -628,7 +628,7 @@
void JavaVMExt::AllowNewWeakGlobals() {
CHECK(!kUseReadBarrier);
Thread* self = Thread::Current();
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
allow_accessing_weak_globals_.StoreSequentiallyConsistent(true);
weak_globals_add_condition_.Broadcast(self);
}
@@ -636,7 +636,7 @@
void JavaVMExt::BroadcastForNewWeakGlobals() {
CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
weak_globals_add_condition_.Broadcast(self);
}
@@ -645,7 +645,7 @@
}
void JavaVMExt::UpdateGlobal(Thread* self, IndirectRef ref, ObjPtr<mirror::Object> result) {
- WriterMutexLock mu(self, globals_lock_);
+ WriterMutexLock mu(self, *Locks::jni_globals_lock_);
globals_.Update(ref, result);
}
@@ -671,13 +671,13 @@
if (LIKELY(MayAccessWeakGlobalsUnlocked(self))) {
return weak_globals_.SynchronizedGet(ref);
}
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
return DecodeWeakGlobalLocked(self, ref);
}
ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobalLocked(Thread* self, IndirectRef ref) {
if (kDebugLocking) {
- weak_globals_lock_.AssertHeld(self);
+ Locks::jni_weak_globals_lock_->AssertHeld(self);
}
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
@@ -700,7 +700,7 @@
bool JavaVMExt::IsWeakGlobalCleared(Thread* self, IndirectRef ref) {
DCHECK_EQ(GetIndirectRefKind(ref), kWeakGlobal);
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
}
@@ -712,18 +712,18 @@
}
void JavaVMExt::UpdateWeakGlobal(Thread* self, IndirectRef ref, ObjPtr<mirror::Object> result) {
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
weak_globals_.Update(ref, result);
}
void JavaVMExt::DumpReferenceTables(std::ostream& os) {
Thread* self = Thread::Current();
{
- ReaderMutexLock mu(self, globals_lock_);
+ ReaderMutexLock mu(self, *Locks::jni_globals_lock_);
globals_.Dump(os);
}
{
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
weak_globals_.Dump(os);
}
}
@@ -920,7 +920,7 @@
}
void JavaVMExt::SweepJniWeakGlobals(IsMarkedVisitor* visitor) {
- MutexLock mu(Thread::Current(), weak_globals_lock_);
+ MutexLock mu(Thread::Current(), *Locks::jni_weak_globals_lock_);
Runtime* const runtime = Runtime::Current();
for (auto* entry : weak_globals_) {
// Need to skip null here to distinguish between null entries and cleared weak ref entries.
@@ -937,13 +937,13 @@
}
void JavaVMExt::TrimGlobals() {
- WriterMutexLock mu(Thread::Current(), globals_lock_);
+ WriterMutexLock mu(Thread::Current(), *Locks::jni_globals_lock_);
globals_.Trim();
}
void JavaVMExt::VisitRoots(RootVisitor* visitor) {
Thread* self = Thread::Current();
- ReaderMutexLock mu(self, globals_lock_);
+ ReaderMutexLock mu(self, *Locks::jni_globals_lock_);
globals_.VisitRoots(visitor, RootInfo(kRootJNIGlobal));
// The weak_globals table is visited by the GC itself (because it mutates the table).
}
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index 2e59a9d..05717f4 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -109,72 +109,81 @@
REQUIRES_SHARED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os)
- REQUIRES(!Locks::jni_libraries_lock_, !globals_lock_, !weak_globals_lock_);
+ REQUIRES(!Locks::jni_libraries_lock_,
+ !Locks::jni_globals_lock_,
+ !Locks::jni_weak_globals_lock_);
void DumpReferenceTables(std::ostream& os)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!globals_lock_, !weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_globals_lock_, !Locks::jni_weak_globals_lock_);
bool SetCheckJniEnabled(bool enabled);
void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!globals_lock_);
+ REQUIRES(!Locks::jni_globals_lock_);
- void DisallowNewWeakGlobals() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
- void AllowNewWeakGlobals() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
- void BroadcastForNewWeakGlobals() REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!weak_globals_lock_);
+ void DisallowNewWeakGlobals()
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
+ void AllowNewWeakGlobals()
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
+ void BroadcastForNewWeakGlobals()
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
jobject AddGlobalRef(Thread* self, ObjPtr<mirror::Object> obj)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_globals_lock_);
jweak AddWeakGlobalRef(Thread* self, ObjPtr<mirror::Object> obj)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
- void DeleteGlobalRef(Thread* self, jobject obj) REQUIRES(!globals_lock_);
+ void DeleteGlobalRef(Thread* self, jobject obj) REQUIRES(!Locks::jni_globals_lock_);
- void DeleteWeakGlobalRef(Thread* self, jweak obj) REQUIRES(!weak_globals_lock_);
+ void DeleteWeakGlobalRef(Thread* self, jweak obj) REQUIRES(!Locks::jni_weak_globals_lock_);
void SweepJniWeakGlobals(IsMarkedVisitor* visitor)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
ObjPtr<mirror::Object> DecodeGlobal(IndirectRef ref)
REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateGlobal(Thread* self, IndirectRef ref, ObjPtr<mirror::Object> result)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_globals_lock_);
ObjPtr<mirror::Object> DecodeWeakGlobal(Thread* self, IndirectRef ref)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!weak_globals_lock_);
+ REQUIRES(!Locks::jni_weak_globals_lock_);
ObjPtr<mirror::Object> DecodeWeakGlobalLocked(Thread* self, IndirectRef ref)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(weak_globals_lock_);
+ REQUIRES(Locks::jni_weak_globals_lock_);
// Like DecodeWeakGlobal() but to be used only during a runtime shutdown where self may be
// null.
ObjPtr<mirror::Object> DecodeWeakGlobalDuringShutdown(Thread* self, IndirectRef ref)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!weak_globals_lock_);
+ REQUIRES(!Locks::jni_weak_globals_lock_);
// Checks if the weak global ref has been cleared by the GC without decode (read barrier.)
bool IsWeakGlobalCleared(Thread* self, IndirectRef ref)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!weak_globals_lock_);
-
- Mutex& WeakGlobalsLock() RETURN_CAPABILITY(weak_globals_lock_) {
- return weak_globals_lock_;
- }
+ REQUIRES(!Locks::jni_weak_globals_lock_);
void UpdateWeakGlobal(Thread* self, IndirectRef ref, ObjPtr<mirror::Object> result)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
const JNIInvokeInterface* GetUncheckedFunctions() const {
return unchecked_functions_;
}
void TrimGlobals() REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!globals_lock_);
+ REQUIRES(!Locks::jni_globals_lock_);
jint HandleGetEnv(/*out*/void** env, jint version);
@@ -187,7 +196,7 @@
bool MayAccessWeakGlobalsUnlocked(Thread* self) const REQUIRES_SHARED(Locks::mutator_lock_);
bool MayAccessWeakGlobals(Thread* self) const
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(weak_globals_lock_);
+ REQUIRES(Locks::jni_weak_globals_lock_);
Runtime* const runtime_;
@@ -203,8 +212,6 @@
// Extra diagnostics.
const std::string trace_;
- // JNI global references.
- ReaderWriterMutex globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// Not guarded by globals_lock since we sometimes use SynchronizedGet in Thread::DecodeJObject.
IndirectReferenceTable globals_;
@@ -215,8 +222,6 @@
// Used by -Xcheck:jni.
const JNIInvokeInterface* const unchecked_functions_;
- // JNI weak global references.
- Mutex weak_globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// Since weak_globals_ contain weak roots, be careful not to
// directly access the object references in it. Use Get() with the
// read barrier enabled.
@@ -224,7 +229,7 @@
IndirectReferenceTable weak_globals_;
// Not guarded by weak_globals_lock since we may use SynchronizedGet in DecodeWeakGlobal.
Atomic<bool> allow_accessing_weak_globals_;
- ConditionVariable weak_globals_add_condition_ GUARDED_BY(weak_globals_lock_);
+ ConditionVariable weak_globals_add_condition_ GUARDED_BY(Locks::jni_weak_globals_lock_);
// TODO Maybe move this to Runtime.
std::vector<GetEnvHook> env_hooks_;
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index d18781a..3789081 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -150,8 +150,11 @@
};
template <bool kIsInstrumented, bool kFillUsable>
-inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
- size_t component_size_shift, gc::AllocatorType allocator_type) {
+inline Array* Array::Alloc(Thread* self,
+ ObjPtr<Class> array_class,
+ int32_t component_count,
+ size_t component_size_shift,
+ gc::AllocatorType allocator_type) {
DCHECK(allocator_type != gc::kAllocatorTypeLOS);
DCHECK(array_class != nullptr);
DCHECK(array_class->IsArrayClass());
@@ -204,7 +207,9 @@
template<typename T>
inline PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
- Array* raw_array = Array::Alloc<true>(self, GetArrayClass(), length,
+ Array* raw_array = Array::Alloc<true>(self,
+ GetArrayClass(),
+ length,
ComponentSizeShiftWidth(sizeof(T)),
Runtime::Current()->GetHeap()->GetCurrentAllocator());
return down_cast<PrimitiveArray<T>*>(raw_array);
@@ -275,7 +280,9 @@
}
template<class T>
-inline void PrimitiveArray<T>::Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos,
+inline void PrimitiveArray<T>::Memmove(int32_t dst_pos,
+ ObjPtr<PrimitiveArray<T>> src,
+ int32_t src_pos,
int32_t count) {
if (UNLIKELY(count == 0)) {
return;
@@ -335,7 +342,9 @@
}
template<class T>
-inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos,
+inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos,
+ ObjPtr<PrimitiveArray<T>> src,
+ int32_t src_pos,
int32_t count) {
if (UNLIKELY(count == 0)) {
return;
@@ -415,6 +424,13 @@
}
}
+template<typename T>
+inline void PrimitiveArray<T>::SetArrayClass(ObjPtr<Class> array_class) {
+ CHECK(array_class_.IsNull());
+ CHECK(array_class != nullptr);
+ array_class_ = GcRoot<Class>(array_class);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 04d02f7..994e9b2 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -39,13 +39,19 @@
// least component_count size, however, if there's usable space at the end of the allocation the
// array will fill it.
template <bool kIsInstrumented, bool kFillUsable = false>
- ALWAYS_INLINE static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
- size_t component_size_shift, gc::AllocatorType allocator_type)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ ALWAYS_INLINE static Array* Alloc(Thread* self,
+ ObjPtr<Class> array_class,
+ int32_t component_count,
+ size_t component_size_shift,
+ gc::AllocatorType allocator_type)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
- static Array* CreateMultiArray(Thread* self, Handle<Class> element_class,
+ static Array* CreateMultiArray(Thread* self,
+ Handle<Class> element_class,
Handle<IntArray> dimensions)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
@@ -147,7 +153,7 @@
* smaller than element size copies). Arguments are assumed to be within the bounds of the array
* and the arrays non-null.
*/
- void Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
+ void Memmove(int32_t dst_pos, ObjPtr<PrimitiveArray<T>> src, int32_t src_pos, int32_t count)
REQUIRES_SHARED(Locks::mutator_lock_);
/*
@@ -155,14 +161,10 @@
* smaller than element size copies). Arguments are assumed to be within the bounds of the array
* and the arrays non-null.
*/
- void Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
+ void Memcpy(int32_t dst_pos, ObjPtr<PrimitiveArray<T>> src, int32_t src_pos, int32_t count)
REQUIRES_SHARED(Locks::mutator_lock_);
- static void SetArrayClass(Class* array_class) {
- CHECK(array_class_.IsNull());
- CHECK(array_class != nullptr);
- array_class_ = GcRoot<Class>(array_class);
- }
+ static void SetArrayClass(ObjPtr<Class> array_class);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
static Class* GetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/class_loader-inl.h b/runtime/mirror/class_loader-inl.h
index cc910b0..f5ecdae 100644
--- a/runtime/mirror/class_loader-inl.h
+++ b/runtime/mirror/class_loader-inl.h
@@ -21,6 +21,7 @@
#include "base/mutex-inl.h"
#include "class_table-inl.h"
+#include "obj_ptr-inl.h"
namespace art {
namespace mirror {
@@ -29,7 +30,7 @@
VerifyObjectFlags kVerifyFlags,
ReadBarrierOption kReadBarrierOption,
typename Visitor>
-inline void ClassLoader::VisitReferences(mirror::Class* klass, const Visitor& visitor) {
+inline void ClassLoader::VisitReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor) {
// Visit instance fields first.
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
if (kVisitClasses) {
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index 407678a..a62a460 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -67,7 +67,7 @@
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitReferences(mirror::Class* klass, const Visitor& visitor)
+ void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::classlinker_classes_lock_);
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index cf45748..86b5e7a 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -27,6 +27,7 @@
#include "mirror/class.h"
#include "mirror/method_type.h"
#include "runtime.h"
+#include "obj_ptr.h"
#include <atomic>
@@ -72,7 +73,7 @@
return GetResolvedTypes()[type_idx].Read();
}
-inline void DexCache::SetResolvedType(uint32_t type_idx, Class* resolved) {
+inline void DexCache::SetResolvedType(uint32_t type_idx, ObjPtr<Class> resolved) {
DCHECK_LT(type_idx, NumResolvedTypes()); // NOTE: Unchecked, i.e. not throwing AIOOB.
// TODO default transaction support.
GetResolvedTypes()[type_idx] = GcRoot<Class>(resolved);
@@ -162,7 +163,7 @@
VerifyObjectFlags kVerifyFlags,
ReadBarrierOption kReadBarrierOption,
typename Visitor>
-inline void DexCache::VisitReferences(mirror::Class* klass, const Visitor& visitor) {
+inline void DexCache::VisitReferences(ObjPtr<Class> klass, const Visitor& visitor) {
// Visit instance fields first.
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
// Visit arrays after.
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 7538c20..c3c7ab4 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -224,7 +224,8 @@
Class* GetResolvedType(uint32_t type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetResolvedType(uint32_t type_idx, Class* resolved) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetResolvedType(uint32_t type_idx, ObjPtr<Class> resolved)
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -341,7 +342,7 @@
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitReferences(mirror::Class* klass, const Visitor& visitor)
+ void VisitReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
HeapReference<Object> dex_;
diff --git a/runtime/mirror/executable.cc b/runtime/mirror/executable.cc
index 33ebd81..17c16a2 100644
--- a/runtime/mirror/executable.cc
+++ b/runtime/mirror/executable.cc
@@ -32,14 +32,10 @@
return true;
}
-template bool Executable::CreateFromArtMethod<PointerSize::k32, false>(
- ArtMethod* method);
-template bool Executable::CreateFromArtMethod<PointerSize::k32, true>(
- ArtMethod* method);
-template bool Executable::CreateFromArtMethod<PointerSize::k64, false>(
- ArtMethod* method);
-template bool Executable::CreateFromArtMethod<PointerSize::k64, true>(
- ArtMethod* method);
+template bool Executable::CreateFromArtMethod<PointerSize::k32, false>(ArtMethod* method);
+template bool Executable::CreateFromArtMethod<PointerSize::k32, true>(ArtMethod* method);
+template bool Executable::CreateFromArtMethod<PointerSize::k64, false>(ArtMethod* method);
+template bool Executable::CreateFromArtMethod<PointerSize::k64, true>(ArtMethod* method);
ArtMethod* Executable::GetArtMethod() {
return reinterpret_cast<ArtMethod*>(GetField64(ArtMethodOffset()));
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index adc5107..c03f20a 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -79,10 +79,15 @@
}
template<bool kTransactionActive>
-void Field::SetDeclaringClass(ObjPtr<mirror::Class> c) {
+inline void Field::SetDeclaringClass(ObjPtr<mirror::Class> c) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c);
}
+template<bool kTransactionActive>
+inline void Field::SetType(ObjPtr<mirror::Class> type) {
+ SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, type_), type);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc
index 65f6b16..f6b6489 100644
--- a/runtime/mirror/field.cc
+++ b/runtime/mirror/field.cc
@@ -27,7 +27,7 @@
GcRoot<Class> Field::static_class_;
GcRoot<Class> Field::array_class_;
-void Field::SetClass(Class* klass) {
+void Field::SetClass(ObjPtr<Class> klass) {
CHECK(static_class_.IsNull()) << static_class_.Read() << " " << klass;
CHECK(klass != nullptr);
static_class_ = GcRoot<Class>(klass);
@@ -38,7 +38,7 @@
static_class_ = GcRoot<Class>(nullptr);
}
-void Field::SetArrayClass(Class* klass) {
+void Field::SetArrayClass(ObjPtr<Class> klass) {
CHECK(array_class_.IsNull()) << array_class_.Read() << " " << klass;
CHECK(klass != nullptr);
array_class_ = GcRoot<Class>(klass);
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index c5357c9..222d709 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -83,10 +83,10 @@
return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_));
}
- static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void SetClass(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static void SetArrayClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void SetArrayClass(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
static void ResetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -113,9 +113,7 @@
void SetDeclaringClass(ObjPtr<mirror::Class> c) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetType(mirror::Class* type) REQUIRES_SHARED(Locks::mutator_lock_) {
- SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, type_), type);
- }
+ void SetType(ObjPtr<mirror::Class> type) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
void SetAccessFlags(uint32_t flags) REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/iftable-inl.h b/runtime/mirror/iftable-inl.h
index b465d07..d6191c2 100644
--- a/runtime/mirror/iftable-inl.h
+++ b/runtime/mirror/iftable-inl.h
@@ -18,11 +18,12 @@
#define ART_RUNTIME_MIRROR_IFTABLE_INL_H_
#include "iftable.h"
+#include "obj_ptr-inl.h"
namespace art {
namespace mirror {
-inline void IfTable::SetInterface(int32_t i, Class* interface) {
+inline void IfTable::SetInterface(int32_t i, ObjPtr<Class> interface) {
DCHECK(interface != nullptr);
DCHECK(interface->IsInterface());
const size_t idx = i * kMax + kInterface;
@@ -30,6 +31,13 @@
SetWithoutChecks<false>(idx, interface);
}
+inline void IfTable::SetMethodArray(int32_t i, ObjPtr<PointerArray> arr) {
+ DCHECK(arr != nullptr);
+ auto idx = i * kMax + kMethodArray;
+ DCHECK(Get(idx) == nullptr);
+ Set<false>(idx, arr);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index a1a2f98..296c163 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -31,7 +31,7 @@
return interface;
}
- ALWAYS_INLINE void SetInterface(int32_t i, Class* interface)
+ ALWAYS_INLINE void SetInterface(int32_t i, ObjPtr<Class> interface)
REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -51,12 +51,7 @@
return method_array == nullptr ? 0u : method_array->GetLength();
}
- void SetMethodArray(int32_t i, PointerArray* arr) REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(arr != nullptr);
- auto idx = i * kMax + kMethodArray;
- DCHECK(Get(idx) == nullptr);
- Set<false>(idx, arr);
- }
+ void SetMethodArray(int32_t i, ObjPtr<PointerArray> arr) REQUIRES_SHARED(Locks::mutator_lock_);
size_t Count() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetLength() / kMax;
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 3e7bca7..f555c80 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -392,8 +392,8 @@
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsIntArray() {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- mirror::Class* klass = GetClass<kVerifyFlags, kReadBarrierOption>();
- mirror::Class* component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
}
@@ -406,8 +406,8 @@
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsLongArray() {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- mirror::Class* klass = GetClass<kVerifyFlags, kReadBarrierOption>();
- mirror::Class* component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
}
@@ -1117,7 +1117,7 @@
// There is no reference offset bitmap. In the non-static case, walk up the class
// inheritance hierarchy and find reference offsets the hard way. In the static case, just
// consider this class.
- for (mirror::Class* klass = kIsStatic
+ for (ObjPtr<Class> klass = kIsStatic
? AsClass<kVerifyFlags, kReadBarrierOption>()
: GetClass<kVerifyFlags, kReadBarrierOption>();
klass != nullptr;
@@ -1146,13 +1146,13 @@
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void Object::VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) {
+inline void Object::VisitInstanceFieldsReferences(ObjPtr<Class> klass, const Visitor& visitor) {
VisitFieldsReferences<false, kVerifyFlags, kReadBarrierOption>(
klass->GetReferenceInstanceOffsets<kVerifyFlags>(), visitor);
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void Object::VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) {
+inline void Object::VisitStaticFieldsReferences(ObjPtr<Class> klass, const Visitor& visitor) {
DCHECK(!klass->IsTemp());
klass->VisitFieldsReferences<true, kVerifyFlags, kReadBarrierOption>(0, visitor);
}
@@ -1186,7 +1186,7 @@
typename JavaLangRefVisitor>
inline void Object::VisitReferences(const Visitor& visitor,
const JavaLangRefVisitor& ref_visitor) {
- mirror::Class* klass = GetClass<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
visitor(this, ClassOffset(), false);
const uint32_t class_flags = klass->GetClassFlags<kVerifyNone>();
if (LIKELY(class_flags == kClassFlagNormal)) {
@@ -1201,7 +1201,7 @@
DCHECK(!klass->IsStringClass());
if (class_flags == kClassFlagClass) {
DCHECK((klass->IsClassClass<kVerifyFlags, kReadBarrierOption>()));
- mirror::Class* as_klass = AsClass<kVerifyNone, kReadBarrierOption>();
+ ObjPtr<Class> as_klass = AsClass<kVerifyNone, kReadBarrierOption>();
as_klass->VisitReferences<kVisitNativeRoots, kVerifyFlags, kReadBarrierOption>(klass,
visitor);
} else if (class_flags == kClassFlagObjectArray) {
@@ -1228,7 +1228,7 @@
// actual string instances.
if (!klass->IsStringClass()) {
size_t total_reference_instance_fields = 0;
- mirror::Class* super_class = klass;
+ ObjPtr<Class> super_class = klass;
do {
total_reference_instance_fields += super_class->NumReferenceInstanceFields();
super_class = super_class->GetSuperClass<kVerifyFlags, kReadBarrierOption>();
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index daee727..7e92c53 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -49,7 +49,7 @@
void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const
ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
// GetFieldObject() contains a RB.
- Object* ref = obj->GetFieldObject<Object>(offset);
+ ObjPtr<Object> ref = obj->GetFieldObject<Object>(offset);
// No WB here as a large object space does not have a card table
// coverage. Instead, cards will be marked separately.
dest_obj_->SetFieldObjectWithoutWriteBarrier<false, false>(offset, ref);
@@ -118,7 +118,7 @@
}
gc::Heap* heap = Runtime::Current()->GetHeap();
// Perform write barriers on copied object references.
- Class* c = src->GetClass();
+ ObjPtr<Class> c = src->GetClass();
if (c->IsArrayClass()) {
if (!c->GetComponentType()->IsPrimitive()) {
ObjectArray<Object>* array = dest->AsObjectArray<Object>();
@@ -182,8 +182,8 @@
hash_code_seed.StoreRelaxed(new_seed);
}
-int32_t Object::IdentityHashCode() const {
- mirror::Object* current_this = const_cast<mirror::Object*>(this);
+int32_t Object::IdentityHashCode() {
+ ObjPtr<Object> current_this = this; // The this pointer may get invalidated by thread suspension.
while (true) {
LockWord lw = current_this->GetLockWord(false);
switch (lw.GetState()) {
@@ -192,7 +192,7 @@
// loop iteration.
LockWord hash_word = LockWord::FromHashCode(GenerateIdentityHashCode(), lw.GCState());
DCHECK_EQ(hash_word.GetState(), LockWord::kHashCode);
- if (const_cast<Object*>(this)->CasLockWordWeakRelaxed(lw, hash_word)) {
+ if (current_this->CasLockWordWeakRelaxed(lw, hash_word)) {
return hash_word.GetHashCode();
}
break;
@@ -227,13 +227,13 @@
}
void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, ObjPtr<Object> new_value) {
- Class* c = GetClass();
+ ObjPtr<Class> c = GetClass();
Runtime* runtime = Runtime::Current();
if (runtime->GetClassLinker() == nullptr || !runtime->IsStarted() ||
!runtime->GetHeap()->IsObjectValidationEnabled() || !c->IsResolved()) {
return;
}
- for (Class* cur = c; cur != nullptr; cur = cur->GetSuperClass()) {
+ for (ObjPtr<Class> cur = c; cur != nullptr; cur = cur->GetSuperClass()) {
for (ArtField& field : cur->GetIFields()) {
StackHandleScope<1> hs(Thread::Current());
Handle<Object> h_object(hs.NewHandle(new_value));
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 84aa96c..13f4028 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -130,9 +130,10 @@
Object* Clone(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
- int32_t IdentityHashCode() const
+ int32_t IdentityHashCode()
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_,
+ !Locks::thread_suspend_count_lock_);
static MemberOffset MonitorOffset() {
return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
@@ -578,12 +579,12 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
+ void VisitInstanceFieldsReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor) HOT_ATTR
REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
+ void VisitStaticFieldsReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor) HOT_ATTR
REQUIRES_SHARED(Locks::mutator_lock_);
private:
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
index 039989b..a449b41 100644
--- a/runtime/mirror/reference-inl.h
+++ b/runtime/mirror/reference-inl.h
@@ -19,6 +19,8 @@
#include "reference.h"
+#include "obj_ptr-inl.h"
+
namespace art {
namespace mirror {
@@ -27,6 +29,24 @@
return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0, pointer_size);
}
+template<bool kTransactionActive>
+inline void Reference::SetReferent(ObjPtr<Object> referent) {
+ SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
+}
+
+inline void Reference::SetPendingNext(ObjPtr<Reference> pending_next) {
+ if (Runtime::Current()->IsActiveTransaction()) {
+ SetFieldObject<true>(PendingNextOffset(), pending_next);
+ } else {
+ SetFieldObject<false>(PendingNextOffset(), pending_next);
+ }
+}
+
+template<bool kTransactionActive>
+inline void FinalizerReference::SetZombie(ObjPtr<Object> zombie) {
+ return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/reference.cc b/runtime/mirror/reference.cc
index 3c7f8c8..1d0b4c5 100644
--- a/runtime/mirror/reference.cc
+++ b/runtime/mirror/reference.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "reference.h"
+#include "reference-inl.h"
#include "art_method.h"
#include "gc_root-inl.h"
@@ -24,7 +24,7 @@
GcRoot<Class> Reference::java_lang_ref_Reference_;
-void Reference::SetClass(Class* java_lang_ref_Reference) {
+void Reference::SetClass(ObjPtr<Class> java_lang_ref_Reference) {
CHECK(java_lang_ref_Reference_.IsNull());
CHECK(java_lang_ref_Reference != nullptr);
java_lang_ref_Reference_ = GcRoot<Class>(java_lang_ref_Reference);
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 6a8b32b..f2fa589 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -20,6 +20,7 @@
#include "base/enums.h"
#include "class.h"
#include "gc_root.h"
+#include "obj_ptr.h"
#include "object.h"
#include "object_callbacks.h"
#include "read_barrier_option.h"
@@ -69,9 +70,7 @@
ReferentOffset());
}
template<bool kTransactionActive>
- void SetReferent(Object* referent) REQUIRES_SHARED(Locks::mutator_lock_) {
- SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
- }
+ void SetReferent(ObjPtr<Object> referent) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
void ClearReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
@@ -82,14 +81,7 @@
return GetFieldObject<Reference, kDefaultVerifyFlags, kReadBarrierOption>(PendingNextOffset());
}
- void SetPendingNext(Reference* pending_next)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (Runtime::Current()->IsActiveTransaction()) {
- SetFieldObject<true>(PendingNextOffset(), pending_next);
- } else {
- SetFieldObject<false>(PendingNextOffset(), pending_next);
- }
- }
+ void SetPendingNext(ObjPtr<Reference> pending_next) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if the reference's pendingNext is null, indicating it is
// okay to process this reference.
@@ -112,7 +104,7 @@
DCHECK(!java_lang_ref_Reference_.IsNull());
return java_lang_ref_Reference_.Read<kReadBarrierOption>();
}
- static void SetClass(Class* klass);
+ static void SetClass(ObjPtr<Class> klass);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -144,9 +136,8 @@
}
template<bool kTransactionActive>
- void SetZombie(Object* zombie) REQUIRES_SHARED(Locks::mutator_lock_) {
- return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
- }
+ void SetZombie(ObjPtr<Object> zombie) REQUIRES_SHARED(Locks::mutator_lock_);
+
Object* GetZombie() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(ZombieOffset());
}
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index e2050cc..c00cf91 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -28,7 +28,7 @@
GcRoot<Class> StackTraceElement::java_lang_StackTraceElement_;
-void StackTraceElement::SetClass(Class* java_lang_StackTraceElement) {
+void StackTraceElement::SetClass(ObjPtr<Class> java_lang_StackTraceElement) {
CHECK(java_lang_StackTraceElement_.IsNull());
CHECK(java_lang_StackTraceElement != nullptr);
java_lang_StackTraceElement_ = GcRoot<Class>(java_lang_StackTraceElement);
@@ -39,30 +39,34 @@
java_lang_StackTraceElement_ = GcRoot<Class>(nullptr);
}
-StackTraceElement* StackTraceElement::Alloc(Thread* self, Handle<String> declaring_class,
- Handle<String> method_name, Handle<String> file_name,
+StackTraceElement* StackTraceElement::Alloc(Thread* self,
+ Handle<String> declaring_class,
+ Handle<String> method_name,
+ Handle<String> file_name,
int32_t line_number) {
ObjPtr<StackTraceElement> trace =
ObjPtr<StackTraceElement>::DownCast(GetStackTraceElement()->AllocObject(self));
if (LIKELY(trace != nullptr)) {
if (Runtime::Current()->IsActiveTransaction()) {
- trace->Init<true>(declaring_class, method_name, file_name, line_number);
+ trace->Init<true>(declaring_class.Get(), method_name.Get(), file_name.Get(), line_number);
} else {
- trace->Init<false>(declaring_class, method_name, file_name, line_number);
+ trace->Init<false>(declaring_class.Get(), method_name.Get(), file_name.Get(), line_number);
}
}
return trace.Ptr();
}
template<bool kTransactionActive>
-void StackTraceElement::Init(Handle<String> declaring_class, Handle<String> method_name,
- Handle<String> file_name, int32_t line_number) {
+void StackTraceElement::Init(ObjPtr<String> declaring_class,
+ ObjPtr<String> method_name,
+ ObjPtr<String> file_name,
+ int32_t line_number) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_),
- declaring_class.Get());
+ declaring_class);
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_),
- method_name.Get());
+ method_name);
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_),
- file_name.Get());
+ file_name);
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_),
line_number);
}
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 4b3d9d0..d32d8dc 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -47,12 +47,14 @@
return GetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_));
}
- static StackTraceElement* Alloc(Thread* self, Handle<String> declaring_class,
- Handle<String> method_name, Handle<String> file_name,
+ static StackTraceElement* Alloc(Thread* self,
+ Handle<String> declaring_class,
+ Handle<String> method_name,
+ Handle<String> file_name,
int32_t line_number)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- static void SetClass(Class* java_lang_StackTraceElement);
+ static void SetClass(ObjPtr<Class> java_lang_StackTraceElement);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -69,7 +71,9 @@
int32_t line_number_;
template<bool kTransactionActive>
- void Init(Handle<String> declaring_class, Handle<String> method_name, Handle<String> file_name,
+ void Init(ObjPtr<String> declaring_class,
+ ObjPtr<String> method_name,
+ ObjPtr<String> file_name,
int32_t line_number)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 46caa4d..ed1103f 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -48,7 +48,7 @@
}
}
-void String::SetClass(Class* java_lang_String) {
+void String::SetClass(ObjPtr<Class> java_lang_String) {
CHECK(java_lang_String_.IsNull());
CHECK(java_lang_String != nullptr);
CHECK(java_lang_String->IsStringClass());
@@ -93,12 +93,13 @@
int32_t length = string->GetLength();
int32_t length2 = string2->GetLength();
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
- const bool compressible = kUseStringCompression && (string->IsCompressed() && string2->IsCompressed());
- const int32_t length_with_flag = (compressible) ? String::GetFlaggedCount(length + length2)
- : (length + length2);
+ const bool compressible = kUseStringCompression &&
+ (string->IsCompressed() && string2->IsCompressed());
+ const int32_t length_with_flag = compressible ? String::GetFlaggedCount(length + length2)
+ : (length + length2);
SetStringCountVisitor visitor(length_with_flag);
- String* new_string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
+ ObjPtr<String> new_string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
if (UNLIKELY(new_string == nullptr)) {
return nullptr;
}
@@ -123,7 +124,7 @@
memcpy(new_value + length, string2->GetValue(), length2 * sizeof(uint16_t));
}
}
- return new_string;
+ return new_string.Ptr();
}
String* String::AllocFromUtf16(Thread* self, int32_t utf16_length, const uint16_t* utf16_data_in) {
@@ -134,7 +135,7 @@
int32_t length_with_flag = (compressible) ? String::GetFlaggedCount(utf16_length)
: utf16_length;
SetStringCountVisitor visitor(length_with_flag);
- String* string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
+ ObjPtr<String> string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
if (UNLIKELY(string == nullptr)) {
return nullptr;
}
@@ -146,7 +147,7 @@
uint16_t* array = string->GetValue();
memcpy(array, utf16_data_in, utf16_length * sizeof(uint16_t));
}
- return string;
+ return string.Ptr();
}
String* String::AllocFromModifiedUtf8(Thread* self, const char* utf) {
@@ -156,18 +157,22 @@
return AllocFromModifiedUtf8(self, char_count, utf, byte_count);
}
-String* String::AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, const char* utf8_data_in) {
+String* String::AllocFromModifiedUtf8(Thread* self,
+ int32_t utf16_length,
+ const char* utf8_data_in) {
return AllocFromModifiedUtf8(self, utf16_length, utf8_data_in, strlen(utf8_data_in));
}
-String* String::AllocFromModifiedUtf8(Thread* self, int32_t utf16_length,
- const char* utf8_data_in, int32_t utf8_length) {
+String* String::AllocFromModifiedUtf8(Thread* self,
+ int32_t utf16_length,
+ const char* utf8_data_in,
+ int32_t utf8_length) {
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
const bool compressible = kUseStringCompression && (utf16_length == utf8_length);
const int32_t utf16_length_with_flag = (compressible) ? String::GetFlaggedCount(utf16_length)
: utf16_length;
SetStringCountVisitor visitor(utf16_length_with_flag);
- String* string = Alloc<true>(self, utf16_length_with_flag, allocator_type, visitor);
+ ObjPtr<String> string = Alloc<true>(self, utf16_length_with_flag, allocator_type, visitor);
if (UNLIKELY(string == nullptr)) {
return nullptr;
}
@@ -177,10 +182,10 @@
uint16_t* utf16_data_out = string->GetValue();
ConvertModifiedUtf8ToUtf16(utf16_data_out, utf16_length, utf8_data_in, utf8_length);
}
- return string;
+ return string.Ptr();
}
-bool String::Equals(String* that) {
+bool String::Equals(ObjPtr<String> that) {
if (this == that) {
// Quick reference equality test
return true;
@@ -281,9 +286,9 @@
return result;
}
-int32_t String::CompareTo(String* rhs) {
+int32_t String::CompareTo(ObjPtr<String> rhs) {
// Quick test for comparison of a string with itself.
- String* lhs = this;
+ ObjPtr<String> lhs = this;
if (lhs == rhs) {
return 0;
}
@@ -298,7 +303,9 @@
int32_t countDiff = lhsCount - rhsCount;
int32_t minCount = (countDiff < 0) ? lhsCount : rhsCount;
if (lhs->IsCompressed() && rhs->IsCompressed()) {
- int32_t comparison = memcmp(lhs->GetValueCompressed(), rhs->GetValueCompressed(), minCount * sizeof(uint8_t));
+ int32_t comparison = memcmp(lhs->GetValueCompressed(),
+ rhs->GetValueCompressed(),
+ minCount * sizeof(uint8_t));
if (comparison != 0) {
return comparison;
}
@@ -326,7 +333,7 @@
CharArray* String::ToCharArray(Thread* self) {
StackHandleScope<1> hs(self);
Handle<String> string(hs.NewHandle(this));
- CharArray* result = CharArray::Alloc(self, GetLength());
+ ObjPtr<CharArray> result = CharArray::Alloc(self, GetLength());
if (result != nullptr) {
if (string->IsCompressed()) {
int32_t length = string->GetLength();
@@ -339,7 +346,7 @@
} else {
self->AssertPendingOOMException();
}
- return result;
+ return result.Ptr();
}
void String::GetChars(int32_t start, int32_t end, Handle<CharArray> array, int32_t index) {
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index a18692f..cfb1153 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -146,7 +146,7 @@
bool Equals(const StringPiece& modified_utf8)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool Equals(String* that) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool Equals(ObjPtr<String> that) REQUIRES_SHARED(Locks::mutator_lock_);
// Compare UTF-16 code point values not in a locale-sensitive manner
int Compare(int32_t utf16_length, const char* utf8_data_in);
@@ -165,7 +165,7 @@
int32_t FastIndexOf(MemoryType* chars, int32_t ch, int32_t start)
REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t CompareTo(String* other) REQUIRES_SHARED(Locks::mutator_lock_);
+ int32_t CompareTo(ObjPtr<String> other) REQUIRES_SHARED(Locks::mutator_lock_);
CharArray* ToCharArray(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
@@ -200,7 +200,7 @@
return java_lang_String_.Read();
}
- static void SetClass(Class* java_lang_String) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void SetClass(ObjPtr<Class> java_lang_String) REQUIRES_SHARED(Locks::mutator_lock_);
static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index 8f3ed84..7aff3de 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -33,7 +33,7 @@
GcRoot<Class> Throwable::java_lang_Throwable_;
-void Throwable::SetDetailMessage(String* new_detail_message) {
+void Throwable::SetDetailMessage(ObjPtr<String> new_detail_message) {
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), new_detail_message);
} else {
@@ -42,7 +42,7 @@
}
}
-void Throwable::SetCause(Throwable* cause) {
+void Throwable::SetCause(ObjPtr<Throwable> cause) {
CHECK(cause != nullptr);
CHECK(cause != this);
Throwable* current_cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_));
@@ -54,7 +54,7 @@
}
}
-void Throwable::SetStackState(Object* state) REQUIRES_SHARED(Locks::mutator_lock_) {
+void Throwable::SetStackState(ObjPtr<Object> state) REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(state != nullptr);
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObjectVolatile<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_), state);
@@ -71,11 +71,11 @@
}
int32_t Throwable::GetStackDepth() {
- Object* stack_state = GetStackState();
+ ObjPtr<Object> stack_state = GetStackState();
if (stack_state == nullptr || !stack_state->IsObjectArray()) {
return -1;
}
- mirror::ObjectArray<mirror::Object>* const trace = stack_state->AsObjectArray<mirror::Object>();
+ ObjPtr<mirror::ObjectArray<Object>> const trace = stack_state->AsObjectArray<Object>();
const int32_t array_len = trace->GetLength();
DCHECK_GT(array_len, 0);
// See method BuildInternalStackTraceVisitor::Init for the format.
@@ -85,22 +85,21 @@
std::string Throwable::Dump() {
std::string result(PrettyTypeOf(this));
result += ": ";
- String* msg = GetDetailMessage();
+ ObjPtr<String> msg = GetDetailMessage();
if (msg != nullptr) {
result += msg->ToModifiedUtf8();
}
result += "\n";
- Object* stack_state = GetStackState();
+ ObjPtr<Object> stack_state = GetStackState();
// check stack state isn't missing or corrupt
if (stack_state != nullptr && stack_state->IsObjectArray()) {
- mirror::ObjectArray<mirror::Object>* object_array =
- stack_state->AsObjectArray<mirror::Object>();
+ ObjPtr<ObjectArray<Object>> object_array = stack_state->AsObjectArray<Object>();
// Decode the internal stack trace into the depth and method trace
// See method BuildInternalStackTraceVisitor::Init for the format.
DCHECK_GT(object_array->GetLength(), 0);
- mirror::Object* methods_and_dex_pcs = object_array->Get(0);
+ ObjPtr<Object> methods_and_dex_pcs = object_array->Get(0);
DCHECK(methods_and_dex_pcs->IsIntArray() || methods_and_dex_pcs->IsLongArray());
- mirror::PointerArray* method_trace = down_cast<mirror::PointerArray*>(methods_and_dex_pcs);
+ ObjPtr<PointerArray> method_trace = ObjPtr<PointerArray>::DownCast(methods_and_dex_pcs);
const int32_t array_len = method_trace->GetLength();
CHECK_EQ(array_len % 2, 0);
const auto depth = array_len / 2;
@@ -118,11 +117,12 @@
}
}
} else {
- Object* stack_trace = GetStackTrace();
+ ObjPtr<Object> stack_trace = GetStackTrace();
if (stack_trace != nullptr && stack_trace->IsObjectArray()) {
CHECK_EQ(stack_trace->GetClass()->GetComponentType(),
StackTraceElement::GetStackTraceElement());
- auto* ste_array = down_cast<ObjectArray<StackTraceElement>*>(stack_trace);
+ ObjPtr<ObjectArray<StackTraceElement>> ste_array =
+ ObjPtr<ObjectArray<StackTraceElement>>::DownCast(stack_trace);
if (ste_array->GetLength() == 0) {
result += "(Throwable with empty stack trace)";
} else {
@@ -142,7 +142,7 @@
result += "(Throwable with no stack trace)";
}
}
- Throwable* cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_));
+ ObjPtr<Throwable> cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_));
if (cause != nullptr && cause != this) { // Constructor makes cause == this by default.
result += "Caused by: ";
result += cause->Dump();
@@ -150,7 +150,7 @@
return result;
}
-void Throwable::SetClass(Class* java_lang_Throwable) {
+void Throwable::SetClass(ObjPtr<Class> java_lang_Throwable) {
CHECK(java_lang_Throwable_.IsNull());
CHECK(java_lang_Throwable != nullptr);
java_lang_Throwable_ = GcRoot<Class>(java_lang_Throwable);
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index 76824cb..0a4ab6f 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -31,7 +31,7 @@
// C++ mirror of java.lang.Throwable
class MANAGED Throwable : public Object {
public:
- void SetDetailMessage(String* new_detail_message) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetDetailMessage(ObjPtr<String> new_detail_message) REQUIRES_SHARED(Locks::mutator_lock_);
String* GetDetailMessage() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_));
@@ -42,8 +42,8 @@
// This is a runtime version of initCause, you shouldn't use it if initCause may have been
// overridden. Also it asserts rather than throwing exceptions. Currently this is only used
// in cases like the verifier where the checks cannot fail and initCause isn't overridden.
- void SetCause(Throwable* cause) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetStackState(Object* state) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetCause(ObjPtr<Throwable> cause) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetStackState(ObjPtr<Object> state) REQUIRES_SHARED(Locks::mutator_lock_);
bool IsCheckedException() REQUIRES_SHARED(Locks::mutator_lock_);
static Class* GetJavaLangThrowable() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -53,7 +53,7 @@
int32_t GetStackDepth() REQUIRES_SHARED(Locks::mutator_lock_);
- static void SetClass(Class* java_lang_Throwable);
+ static void SetClass(ObjPtr<Class> java_lang_Throwable);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index 0d1839b..1dd60f8 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -21,7 +21,7 @@
#include <sys/stat.h>
#include <sys/types.h>
-#include <android/log.h>
+#include <log/log.h>
#define EVENT_LOG_TAG_dvm_lock_sample 20003
diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc
index 95f6d51..bedca10 100644
--- a/runtime/native/java_lang_ref_Reference.cc
+++ b/runtime/native/java_lang_ref_Reference.cc
@@ -28,8 +28,8 @@
static jobject Reference_getReferent(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::Reference> ref = soa.Decode<mirror::Reference>(javaThis);
- mirror::Object* const referent =
- Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(soa.Self(), ref.Ptr());
+ ObjPtr<mirror::Object> const referent =
+ Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(soa.Self(), ref);
return soa.AddLocalReference<jobject>(referent);
}
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index f164a92..c14b616 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -959,7 +959,7 @@
DCHECK(!error_msg->empty());
return false;
}
- bool loaded = elf_file_->Load(executable, low_4gb, error_msg);
+ bool loaded = elf_file_->Load(file, executable, low_4gb, error_msg);
DCHECK(loaded || !error_msg->empty());
return loaded;
}
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index 05da585..ac8d5e1 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -476,7 +476,8 @@
}
static jvmtiError GetLoadedClasses(jvmtiEnv* env, jint* class_count_ptr, jclass** classes_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ HeapUtil heap_util(&gObjectTagTable);
+ return heap_util.GetLoadedClasses(env, class_count_ptr, classes_ptr);
}
static jvmtiError GetClassLoaderClasses(jvmtiEnv* env,
diff --git a/runtime/openjdkjvmti/heap.cc b/runtime/openjdkjvmti/heap.cc
index 95d9a1d..859941c 100644
--- a/runtime/openjdkjvmti/heap.cc
+++ b/runtime/openjdkjvmti/heap.cc
@@ -19,7 +19,10 @@
#include "art_jvmti.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "class_linker.h"
#include "gc/heap.h"
+#include "java_vm_ext.h"
+#include "jni_env_ext.h"
#include "mirror/class.h"
#include "object_callbacks.h"
#include "object_tagging.h"
@@ -163,4 +166,49 @@
return ERR(NONE);
}
+jvmtiError HeapUtil::GetLoadedClasses(jvmtiEnv* env,
+ jint* class_count_ptr,
+ jclass** classes_ptr) {
+ if (class_count_ptr == nullptr || classes_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ class ReportClassVisitor : public art::ClassVisitor {
+ public:
+ explicit ReportClassVisitor(art::Thread* self) : self_(self) {}
+
+ bool operator()(art::mirror::Class* klass) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::JNIEnvExt* jni_env = self_->GetJniEnv();
+ classes_.push_back(reinterpret_cast<jclass>(jni_env->vm->AddGlobalRef(self_, klass)));
+ return true;
+ }
+
+ art::Thread* self_;
+ std::vector<jclass> classes_;
+ };
+
+ art::Thread* self = art::Thread::Current();
+ ReportClassVisitor rcv(self);
+ {
+ art::ScopedObjectAccess soa(self);
+ art::Runtime::Current()->GetClassLinker()->VisitClasses(&rcv);
+ }
+
+ size_t size = rcv.classes_.size();
+ jclass* classes = nullptr;
+ jvmtiError alloc_ret = env->Allocate(static_cast<jlong>(size * sizeof(jclass)),
+ reinterpret_cast<unsigned char**>(&classes));
+ if (alloc_ret != ERR(NONE)) {
+ return alloc_ret;
+ }
+
+ for (size_t i = 0; i < size; ++i) {
+ classes[i] = rcv.classes_[i];
+ }
+ *classes_ptr = classes;
+ *class_count_ptr = static_cast<jint>(size);
+
+ return ERR(NONE);
+}
+
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/heap.h b/runtime/openjdkjvmti/heap.h
index fb9a216..b6becb9 100644
--- a/runtime/openjdkjvmti/heap.h
+++ b/runtime/openjdkjvmti/heap.h
@@ -28,6 +28,8 @@
explicit HeapUtil(ObjectTagTable* tags) : tags_(tags) {
}
+ jvmtiError GetLoadedClasses(jvmtiEnv* env, jint* class_count_ptr, jclass** classes_ptr);
+
jvmtiError IterateThroughHeap(jvmtiEnv* env,
jint heap_filter,
jclass klass,
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 9c0d2db..6e15c38 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -147,6 +147,10 @@
#include "verifier/method_verifier.h"
#include "well_known_classes.h"
+#ifdef ART_TARGET_ANDROID
+#include <android/set_abort_message.h>
+#endif
+
namespace art {
// If a signal isn't handled properly, enable a handler that attempts to dump the Java stack.
@@ -495,7 +499,7 @@
bool Runtime::ParseOptions(const RuntimeOptions& raw_options,
bool ignore_unrecognized,
RuntimeArgumentMap* runtime_options) {
- InitLogging(/* argv */ nullptr); // Calls Locks::Init() as a side effect.
+ InitLogging(/* argv */ nullptr, Aborter); // Calls Locks::Init() as a side effect.
bool parsed = ParsedOptions::Parse(raw_options, ignore_unrecognized, runtime_options);
if (!parsed) {
LOG(ERROR) << "Failed to parse options";
@@ -829,7 +833,7 @@
if (file.get() == nullptr) {
return false;
}
- std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.release(),
+ std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.get(),
false /* writable */,
false /* program_header_only */,
false /* low_4gb */,
@@ -1165,10 +1169,6 @@
ScopedTrace trace2("AddImageStringsToTable");
GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces());
}
- {
- ScopedTrace trace2("MoveImageClassesToClassTable");
- GetClassLinker()->AddBootImageClassesToClassTable();
- }
} else {
std::vector<std::string> dex_filenames;
Split(boot_class_path_string_, ':', &dex_filenames);
@@ -2099,4 +2099,12 @@
}
}
+NO_RETURN
+void Runtime::Aborter(const char* abort_message) {
+#ifdef __ANDROID__
+ android_set_abort_message(abort_message);
+#endif
+ Runtime::Abort(abort_message);
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 66fd058..e2ba262 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -662,6 +662,9 @@
void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
+ NO_RETURN
+ static void Aborter(const char* abort_message);
+
private:
static void InitPlatformSignalHandlers();
diff --git a/test/907-get-loaded-classes/build b/test/907-get-loaded-classes/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/907-get-loaded-classes/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/907-get-loaded-classes/expected.txt b/test/907-get-loaded-classes/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/907-get-loaded-classes/expected.txt
diff --git a/test/907-get-loaded-classes/get_loaded_classes.cc b/test/907-get-loaded-classes/get_loaded_classes.cc
new file mode 100644
index 0000000..e752bcb
--- /dev/null
+++ b/test/907-get-loaded-classes/get_loaded_classes.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "get_loaded_classes.h"
+
+#include <iostream>
+#include <pthread.h>
+#include <stdio.h>
+#include <vector>
+
+#include "base/macros.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedLocalRef.h"
+#include "ScopedUtfChars.h"
+
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test907GetLoadedClasses {
+
+static jstring GetClassName(JNIEnv* jni_env, jclass cls) {
+ ScopedLocalRef<jclass> class_class(jni_env, jni_env->GetObjectClass(cls));
+ jmethodID mid = jni_env->GetMethodID(class_class.get(), "getName", "()Ljava/lang/String;");
+ return reinterpret_cast<jstring>(jni_env->CallObjectMethod(cls, mid));
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getLoadedClasses(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+ jint count = -1;
+ jclass* classes = nullptr;
+ jvmtiError result = jvmti_env->GetLoadedClasses(&count, &classes);
+ if (result != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(result, &err);
+ printf("Failure running GetLoadedClasses: %s\n", err);
+ return nullptr;
+ }
+
+ ScopedLocalRef<jclass> obj_class(env, env->FindClass("java/lang/String"));
+ if (obj_class.get() == nullptr) {
+ return nullptr;
+ }
+
+ jobjectArray ret = env->NewObjectArray(count, obj_class.get(), nullptr);
+ if (ret == nullptr) {
+ return ret;
+ }
+
+ for (size_t i = 0; i < static_cast<size_t>(count); ++i) {
+ jstring class_name = GetClassName(env, classes[i]);
+ env->SetObjectArrayElement(ret, static_cast<jint>(i), class_name);
+ env->DeleteLocalRef(class_name);
+ }
+
+ // Need to:
+ // 1) Free the local references.
+ // 2) Deallocate.
+ for (size_t i = 0; i < static_cast<size_t>(count); ++i) {
+ env->DeleteGlobalRef(classes[i]);
+ }
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(classes));
+
+ return ret;
+}
+
+} // namespace Test907GetLoadedClasses
+} // namespace art
diff --git a/test/907-get-loaded-classes/get_loaded_classes.h b/test/907-get-loaded-classes/get_loaded_classes.h
new file mode 100644
index 0000000..4d27f89
--- /dev/null
+++ b/test/907-get-loaded-classes/get_loaded_classes.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_907_GET_LOADED_CLASSES_GET_LOADED_CLASSES_H_
+#define ART_TEST_907_GET_LOADED_CLASSES_GET_LOADED_CLASSES_H_
+
+#include <jni.h>
+
+namespace art {
+namespace Test907GetLoadedClasses {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+} // namespace Test907GetLoadedClasses
+} // namespace art
+
+#endif // ART_TEST_907_GET_LOADED_CLASSES_GET_LOADED_CLASSES_H_
diff --git a/test/907-get-loaded-classes/info.txt b/test/907-get-loaded-classes/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/907-get-loaded-classes/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/907-get-loaded-classes/run b/test/907-get-loaded-classes/run
new file mode 100755
index 0000000..3e135a3
--- /dev/null
+++ b/test/907-get-loaded-classes/run
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if [[ "$@" == *"-O"* ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+ arg="jvm"
+else
+ arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+ other_args=" -Xcompiler-option --debuggable "
+else
+ other_args=""
+fi
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --runtime-option -agentpath:${agent}=906-iterate-heap,${arg} \
+ --android-runtime-option -Xplugin:${plugin} \
+ ${other_args} \
+ --args ${lib}
diff --git a/test/907-get-loaded-classes/src/Main.java b/test/907-get-loaded-classes/src/Main.java
new file mode 100644
index 0000000..468d037
--- /dev/null
+++ b/test/907-get-loaded-classes/src/Main.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[1]);
+
+ doTest();
+ }
+
+ public static void doTest() throws Exception {
+ // Ensure some classes are loaded.
+ A a = new A();
+ B b = new B();
+ A[] aArray = new A[5];
+
+ String[] classes = getLoadedClasses();
+ HashSet<String> classesSet = new HashSet<>(Arrays.asList(classes));
+
+ String[] shouldBeLoaded = new String[] {
+ "java.lang.Object", "java.lang.Class", "java.lang.String", "Main$A", "Main$B", "[LMain$A;"
+ };
+
+ boolean error = false;
+ for (String s : shouldBeLoaded) {
+ if (!classesSet.contains(s)) {
+ System.out.println("Did not find " + s);
+ error = true;
+ }
+ }
+
+ if (error) {
+ System.out.println(Arrays.toString(classes));
+ }
+ }
+
+ static class A {
+ }
+
+ static class B {
+ }
+
+ private static native String[] getLoadedClasses();
+}
diff --git a/test/978-virtual-interface/build b/test/978-virtual-interface/build
new file mode 100755
index 0000000..14230c2
--- /dev/null
+++ b/test/978-virtual-interface/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+./default-build "$@" --experimental default-methods
diff --git a/test/978-virtual-interface/expected.txt b/test/978-virtual-interface/expected.txt
new file mode 100644
index 0000000..99071b1
--- /dev/null
+++ b/test/978-virtual-interface/expected.txt
@@ -0,0 +1 @@
+Recieved expected ICCE error!
diff --git a/test/978-virtual-interface/info.txt b/test/978-virtual-interface/info.txt
new file mode 100644
index 0000000..0b8a39f
--- /dev/null
+++ b/test/978-virtual-interface/info.txt
@@ -0,0 +1,7 @@
+Smali-based regression test for b/32201623
+
+This test cannot be run with --jvm.
+
+This test checks that we correctly detect when one attempts to invoke an
+interface method via the invoke-virtual opcode and that correct exceptions are
+sent.
diff --git a/test/978-virtual-interface/smali/Iface.smali b/test/978-virtual-interface/smali/Iface.smali
new file mode 100644
index 0000000..9c3ef7a
--- /dev/null
+++ b/test/978-virtual-interface/smali/Iface.smali
@@ -0,0 +1,110 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# // Methods are sorted in alphabetical order in dex file. We need 10 padding
+# // methods to ensure the 11'th target lines up to the same vtable slot as the
+# // first Subtype virtual method (the other 10 are the java/lang/Object;
+# // methods).
+# interface Iface {
+# public default void fakeMethod_A() {}
+# public default void fakeMethod_B() {}
+# public default void fakeMethod_C() {}
+# public default void fakeMethod_D() {}
+# public default void fakeMethod_E() {}
+# public default void fakeMethod_F() {}
+# public default void fakeMethod_G() {}
+# public default void fakeMethod_H() {}
+# public default void fakeMethod_I() {}
+# public default void fakeMethod_J() {}
+# public default void fakeMethod_K() {}
+# public default void fakeMethod_Target() {}
+# }
+
+.class public abstract interface LIface;
+
+.super Ljava/lang/Object;
+
+# // 1
+.method public fakeMethod_A()V
+ .locals 0
+ return-void
+.end method
+
+# // 2
+.method public fakeMethod_B()V
+ .locals 0
+ return-void
+.end method
+
+# // 3
+.method public fakeMethod_C()V
+ .locals 0
+ return-void
+.end method
+
+# // 4
+.method public fakeMethod_D()V
+ .locals 0
+ return-void
+.end method
+
+# // 5
+.method public fakeMethod_E()V
+ .locals 0
+ return-void
+.end method
+
+# // 5
+.method public fakeMethod_F()V
+ .locals 0
+ return-void
+.end method
+
+# // 6
+.method public fakeMethod_G()V
+ .locals 0
+ return-void
+.end method
+
+# // 7
+.method public fakeMethod_H()V
+ .locals 0
+ return-void
+.end method
+
+# // 8
+.method public fakeMethod_I()V
+ .locals 0
+ return-void
+.end method
+
+# // 9
+.method public fakeMethod_J()V
+ .locals 0
+ return-void
+.end method
+
+# // 10
+.method public fakeMethod_K()V
+ .locals 0
+ return-void
+.end method
+
+# // 11
+.method public fakeMethod_Target()V
+ .locals 0
+ return-void
+.end method
diff --git a/test/978-virtual-interface/smali/Main.smali b/test/978-virtual-interface/smali/Main.smali
new file mode 100644
index 0000000..61b82f3
--- /dev/null
+++ b/test/978-virtual-interface/smali/Main.smali
@@ -0,0 +1,50 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public class Main {
+# public static void main(String[] s) {
+# Subtype s = new Subtype();
+# try {
+# s.callPackage();
+# System.out.println("No error thrown!");
+# } catch (IncompatibleClassChangeError e) {
+# System.out.println("Recieved expected ICCE error!");
+# }
+# }
+# }
+
+.class public LMain;
+
+.super Ljava/lang/Object;
+
+.method public static main([Ljava/lang/String;)V
+ .locals 3
+
+ new-instance v0, LSubtype;
+ invoke-direct {v0}, LSubtype;-><init>()V
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ :try_start
+ invoke-virtual {v0}, LSubtype;->callPackage()V
+ const-string v1, "No error thrown!"
+ invoke-virtual {v2, v1}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+ return-void
+ :try_end
+ .catch Ljava/lang/IncompatibleClassChangeError; {:try_start .. :try_end} :error_start
+ :error_start
+ const-string v1, "Recieved expected ICCE error!"
+ invoke-virtual {v2, v1}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+ return-void
+.end method
diff --git a/test/978-virtual-interface/smali/Subtype.smali b/test/978-virtual-interface/smali/Subtype.smali
new file mode 100644
index 0000000..f876cf9
--- /dev/null
+++ b/test/978-virtual-interface/smali/Subtype.smali
@@ -0,0 +1,40 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public class Subtype extends pkg.Target implements Iface{
+# public void callPackage() {
+# // Fake into a virtual call.
+# // ((Iface)this).fakeMethod_Target();
+# }
+# }
+
+.class public LSubtype;
+
+.super Lpkg/Target;
+
+.implements LIface;
+
+.method public constructor <init>()V
+ .locals 0
+ invoke-direct {p0}, Lpkg/Target;-><init>()V
+ return-void
+.end method
+
+.method public callPackage()V
+ .locals 0
+ invoke-virtual {p0}, LIface;->fakeMethod_Target()V
+ return-void
+.end method
diff --git a/test/978-virtual-interface/smali/Target.smali b/test/978-virtual-interface/smali/Target.smali
new file mode 100644
index 0000000..70108fb
--- /dev/null
+++ b/test/978-virtual-interface/smali/Target.smali
@@ -0,0 +1,40 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# package pkg;
+# public class Target {
+# public void packageMethod() {
+# System.out.println("Package method called!");
+# }
+# }
+
+.class public Lpkg/Target;
+
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+ .locals 0
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method packageMethod()V
+ .locals 2
+ const-string v1, "Package method called!"
+ sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {v0, v1}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+ return-void
+.end method
diff --git a/test/Android.bp b/test/Android.bp
index 45673f5..8496ffd 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -249,6 +249,7 @@
"904-object-allocation/tracking.cc",
"905-object-free/tracking_free.cc",
"906-iterate-heap/iterate_heap.cc",
+ "907-get-loaded-classes/get_loaded_classes.cc",
],
shared_libs: [
"libbase",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 64ff5ba..7a5dab0 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -263,7 +263,7 @@
# 147-stripped-dex-fallback isn't supported on device because --strip-dex
# requires the zip command.
# 569-checker-pattern-replacement tests behaviour present only on host.
-# 90{2,3,4,5,6} are not supported in current form due to linker
+# 90{2,3,4,5,6,7} are not supported in current form due to linker
# restrictions. See b/31681198
TEST_ART_BROKEN_TARGET_TESTS := \
147-stripped-dex-fallback \
@@ -273,6 +273,7 @@
904-object-allocation \
905-object-free \
906-iterate-heap \
+ 907-get-loaded-classes \
ifneq (,$(filter target,$(TARGET_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \