am c63a505a: Fix concurrent start bytes race
* commit 'c63a505ae9992cc6ad154179734f078594c72d01':
Fix concurrent start bytes race
diff --git a/build/Android.executable.mk b/build/Android.executable.mk
index d887acd..02252ab 100644
--- a/build/Android.executable.mk
+++ b/build/Android.executable.mk
@@ -54,7 +54,7 @@
include $(CLEAR_VARS)
LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
LOCAL_MODULE_TAGS := optional
- LOCAL_SRC_FILES := $$(art_source)
+ LOCAL_SRC_FILES := $$(art_source) ../sigchainlib/sigchain.cc
LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime $$(art_c_includes)
LOCAL_SHARED_LIBRARIES += $$(art_shared_libraries)
@@ -65,9 +65,15 @@
endif
LOCAL_CFLAGS := $(ART_EXECUTABLES_CFLAGS)
+ # Mac OS linker doesn't understand --export-dynamic/--version-script.
+ ifneq ($$(HOST_OS)-$$(art_target_or_host),darwin-host)
+ LOCAL_LDFLAGS := -Wl,--version-script,art/sigchainlib/version-script.txt -Wl,--export-dynamic
+ endif
+
ifeq ($$(art_target_or_host),target)
$(call set-target-local-clang-vars)
$(call set-target-local-cflags-vars,$(6))
+ LOCAL_SHARED_LIBRARIES += libdl
else # host
LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
@@ -76,7 +82,7 @@
else
LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS)
endif
- LOCAL_LDLIBS += -lpthread
+ LOCAL_LDLIBS += -lpthread -ldl
endif
ifeq ($$(art_ndebug_or_debug),ndebug)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index a7d852b..bf9e9ec 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -278,7 +278,7 @@
.PHONY: $$(gtest_rule)
$$(gtest_rule): $$(gtest_exe) $$(ART_GTEST_$(1)_HOST_DEPS) $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) $$(gtest_deps)
- $(hide) ($$(call ART_TEST_SKIP,$$@) && LD_PRELOAD=libsigchain$$(ART_HOST_SHLIB_EXTENSION) $$< && $$(call ART_TEST_PASSED,$$@)) \
+ $(hide) ($$(call ART_TEST_SKIP,$$@) && $$< && $$(call ART_TEST_PASSED,$$@)) \
|| $$(call ART_TEST_FAILED,$$@)
ART_TEST_HOST_GTEST$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(gtest_rule)
@@ -326,7 +326,7 @@
LOCAL_MODULE_TAGS := tests
endif
LOCAL_CPP_EXTENSION := $$(ART_CPP_EXTENSION)
- LOCAL_SRC_FILES := $$(art_gtest_filename)
+ LOCAL_SRC_FILES := $$(art_gtest_filename) sigchainlib/sigchain.cc
LOCAL_C_INCLUDES += $$(ART_C_INCLUDES) art/runtime $$(art_gtest_extra_c_includes)
LOCAL_SHARED_LIBRARIES += libartd $$(art_gtest_extra_shared_libraries) libart-gtest
diff --git a/dalvikvm/Android.mk b/dalvikvm/Android.mk
index a06b5c5..0bab429 100644
--- a/dalvikvm/Android.mk
+++ b/dalvikvm/Android.mk
@@ -24,10 +24,11 @@
LOCAL_MODULE := dalvikvm
LOCAL_MODULE_TAGS := optional
LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := dalvikvm.cc
+LOCAL_SRC_FILES := dalvikvm.cc ../sigchainlib/sigchain.cc
LOCAL_CFLAGS := $(dalvikvm_cflags)
LOCAL_C_INCLUDES := art/runtime
-LOCAL_SHARED_LIBRARIES := libdl libnativehelper
+LOCAL_SHARED_LIBRARIES := libdl liblog libnativehelper
+LOCAL_LDFLAGS := -Wl,--version-script,art/sigchainlib/version-script.txt -Wl,--export-dynamic
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common.mk
LOCAL_MULTILIB := both
@@ -50,11 +51,15 @@
LOCAL_MODULE_TAGS := optional
LOCAL_CLANG := true
LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := dalvikvm.cc
+LOCAL_SRC_FILES := dalvikvm.cc ../sigchainlib/sigchain.cc
LOCAL_CFLAGS := $(dalvikvm_cflags)
LOCAL_C_INCLUDES := art/runtime
LOCAL_SHARED_LIBRARIES := libnativehelper
LOCAL_LDFLAGS := -ldl -lpthread
+# Mac OS linker doesn't understand --export-dynamic.
+ifneq ($(HOST_OS),darwin)
+ LOCAL_LDFLAGS += -Wl,--export-dynamic
+endif
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common.mk
LOCAL_IS_HOST_MODULE := true
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
index 4db5ea6..6add93b 100644
--- a/runtime/arch/mips/asm_support_mips.h
+++ b/runtime/arch/mips/asm_support_mips.h
@@ -22,9 +22,9 @@
// Offset of field Thread::tls32_.state_and_flags verified in InitCpu
#define THREAD_FLAGS_OFFSET 0
// Offset of field Thread::tlsPtr_.card_table verified in InitCpu
-#define THREAD_CARD_TABLE_OFFSET 112
+#define THREAD_CARD_TABLE_OFFSET 120
// Offset of field Thread::tlsPtr_.exception verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 116
+#define THREAD_EXCEPTION_OFFSET 124
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 64
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 64
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 9b97411..495ea5c 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -647,7 +647,13 @@
void ReaderWriterMutex::Dump(std::ostream& os) const {
os << name_
<< " level=" << static_cast<int>(level_)
- << " owner=" << GetExclusiveOwnerTid() << " ";
+ << " owner=" << GetExclusiveOwnerTid()
+#if ART_USE_FUTEXES
+ << " state=" << state_.LoadSequentiallyConsistent()
+ << " num_pending_writers=" << num_pending_writers_.LoadSequentiallyConsistent()
+ << " num_pending_readers=" << num_pending_readers_.LoadSequentiallyConsistent()
+#endif
+ << " ";
DumpContention(os);
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index e074fc1..dc845c7 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2460,298 +2460,325 @@
return JDWP::ERR_NONE;
}
-JDWP::JdwpError Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
- JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
- struct GetLocalVisitor : public StackVisitor {
- GetLocalVisitor(const ScopedObjectAccessUnchecked& soa, Thread* thread, Context* context,
- JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), soa_(soa), frame_id_(frame_id), slot_(slot), tag_(tag),
- buf_(buf), width_(width), error_(JDWP::ERR_NONE) {}
+// Walks the stack until we find the frame with the given FrameId.
+class FindFrameVisitor FINAL : public StackVisitor {
+ public:
+ FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : StackVisitor(thread, context), frame_id_(frame_id), error_(JDWP::ERR_INVALID_FRAMEID) {}
- // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
- // annotalysis.
- bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
- if (GetFrameId() != frame_id_) {
- return true; // Not our frame, carry on.
- }
- // TODO: check that the tag is compatible with the actual type of the slot!
- // TODO: check slot is valid for this method or return INVALID_SLOT error.
- mirror::ArtMethod* m = GetMethod();
- if (m->IsNative()) {
- // We can't read local value from native method.
- error_ = JDWP::ERR_OPAQUE_FRAME;
- return false;
- }
- uint16_t reg = DemangleSlot(slot_, m);
- constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
- switch (tag_) {
- case JDWP::JT_BOOLEAN: {
- CHECK_EQ(width_, 1U);
- uint32_t intVal;
- if (GetVReg(m, reg, kIntVReg, &intVal)) {
- VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
- JDWP::Set1(buf_+1, intVal != 0);
- } else {
- VLOG(jdwp) << "failed to get boolean local " << reg;
- error_ = kFailureErrorCode;
- }
- break;
- }
- case JDWP::JT_BYTE: {
- CHECK_EQ(width_, 1U);
- uint32_t intVal;
- if (GetVReg(m, reg, kIntVReg, &intVal)) {
- VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
- JDWP::Set1(buf_+1, intVal);
- } else {
- VLOG(jdwp) << "failed to get byte local " << reg;
- error_ = kFailureErrorCode;
- }
- break;
- }
- case JDWP::JT_SHORT:
- case JDWP::JT_CHAR: {
- CHECK_EQ(width_, 2U);
- uint32_t intVal;
- if (GetVReg(m, reg, kIntVReg, &intVal)) {
- VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
- JDWP::Set2BE(buf_+1, intVal);
- } else {
- VLOG(jdwp) << "failed to get short/char local " << reg;
- error_ = kFailureErrorCode;
- }
- break;
- }
- case JDWP::JT_INT: {
- CHECK_EQ(width_, 4U);
- uint32_t intVal;
- if (GetVReg(m, reg, kIntVReg, &intVal)) {
- VLOG(jdwp) << "get int local " << reg << " = " << intVal;
- JDWP::Set4BE(buf_+1, intVal);
- } else {
- VLOG(jdwp) << "failed to get int local " << reg;
- error_ = kFailureErrorCode;
- }
- break;
- }
- case JDWP::JT_FLOAT: {
- CHECK_EQ(width_, 4U);
- uint32_t intVal;
- if (GetVReg(m, reg, kFloatVReg, &intVal)) {
- VLOG(jdwp) << "get float local " << reg << " = " << intVal;
- JDWP::Set4BE(buf_+1, intVal);
- } else {
- VLOG(jdwp) << "failed to get float local " << reg;
- error_ = kFailureErrorCode;
- }
- break;
- }
- case JDWP::JT_ARRAY:
- case JDWP::JT_CLASS_LOADER:
- case JDWP::JT_CLASS_OBJECT:
- case JDWP::JT_OBJECT:
- case JDWP::JT_STRING:
- case JDWP::JT_THREAD:
- case JDWP::JT_THREAD_GROUP: {
- CHECK_EQ(width_, sizeof(JDWP::ObjectId));
- uint32_t intVal;
- if (GetVReg(m, reg, kReferenceVReg, &intVal)) {
- mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
- VLOG(jdwp) << "get " << tag_ << " object local " << reg << " = " << o;
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
- LOG(FATAL) << "Register " << reg << " expected to hold " << tag_ << " object: " << o;
- }
- tag_ = TagFromObject(soa_, o);
- JDWP::SetObjectId(buf_+1, gRegistry->Add(o));
- } else {
- VLOG(jdwp) << "failed to get " << tag_ << " object local " << reg;
- error_ = kFailureErrorCode;
- }
- break;
- }
- case JDWP::JT_DOUBLE: {
- CHECK_EQ(width_, 8U);
- uint64_t longVal;
- if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
- VLOG(jdwp) << "get double local " << reg << " = " << longVal;
- JDWP::Set8BE(buf_+1, longVal);
- } else {
- VLOG(jdwp) << "failed to get double local " << reg;
- error_ = kFailureErrorCode;
- }
- break;
- }
- case JDWP::JT_LONG: {
- CHECK_EQ(width_, 8U);
- uint64_t longVal;
- if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) {
- VLOG(jdwp) << "get long local " << reg << " = " << longVal;
- JDWP::Set8BE(buf_+1, longVal);
- } else {
- VLOG(jdwp) << "failed to get long local " << reg;
- error_ = kFailureErrorCode;
- }
- break;
- }
- default:
- LOG(FATAL) << "Unknown tag " << tag_;
- break;
- }
-
- // Prepend tag, which may have been updated.
- JDWP::Set1(buf_, tag_);
- return false;
+ // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
+ // annotalysis.
+ bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
+ if (GetFrameId() != frame_id_) {
+ return true; // Not our frame, carry on.
}
- const ScopedObjectAccessUnchecked& soa_;
- const JDWP::FrameId frame_id_;
- const int slot_;
- JDWP::JdwpTag tag_;
- uint8_t* const buf_;
- const size_t width_;
- JDWP::JdwpError error_;
- };
+ mirror::ArtMethod* m = GetMethod();
+ if (m->IsNative()) {
+ // We can't read/write local value from/into native method.
+ error_ = JDWP::ERR_OPAQUE_FRAME;
+ } else {
+ // We found our frame.
+ error_ = JDWP::ERR_NONE;
+ }
+ return false;
+ }
+
+ JDWP::JdwpError GetError() const {
+ return error_;
+ }
+
+ private:
+ const JDWP::FrameId frame_id_;
+ JDWP::JdwpError error_;
+};
+
+JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) {
+ JDWP::ObjectId thread_id = request->ReadThreadId();
+ JDWP::FrameId frame_id = request->ReadFrameId();
ScopedObjectAccessUnchecked soa(Thread::Current());
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread;
- JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
- if (error != JDWP::ERR_NONE) {
- return error;
+ {
+ MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
+ JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
}
- // TODO check thread is suspended by the debugger ?
+ // Find the frame with the given frame_id.
std::unique_ptr<Context> context(Context::Create());
- GetLocalVisitor visitor(soa, thread, context.get(), frame_id, slot, tag, buf, width);
+ FindFrameVisitor visitor(thread, context.get(), frame_id);
visitor.WalkStack();
- return visitor.error_;
+ if (visitor.GetError() != JDWP::ERR_NONE) {
+ return visitor.GetError();
+ }
+
+ // Read the values from visitor's context.
+ int32_t slot_count = request->ReadSigned32("slot count");
+ expandBufAdd4BE(pReply, slot_count); /* "int values" */
+ for (int32_t i = 0; i < slot_count; ++i) {
+ uint32_t slot = request->ReadUnsigned32("slot");
+ JDWP::JdwpTag reqSigByte = request->ReadTag();
+
+ VLOG(jdwp) << " --> slot " << slot << " " << reqSigByte;
+
+ size_t width = Dbg::GetTagWidth(reqSigByte);
+ uint8_t* ptr = expandBufAddSpace(pReply, width+1);
+ JDWP::JdwpError error = Dbg::GetLocalValue(visitor, soa, slot, reqSigByte, ptr, width);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
+ }
+ return JDWP::ERR_NONE;
}
-JDWP::JdwpError Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
- JDWP::JdwpTag tag, uint64_t value, size_t width) {
- struct SetLocalVisitor : public StackVisitor {
- SetLocalVisitor(Thread* thread, Context* context,
- JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value,
- size_t width)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context),
- frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width),
- error_(JDWP::ERR_NONE) {}
-
- // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
- // annotalysis.
- bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
- if (GetFrameId() != frame_id_) {
- return true; // Not our frame, carry on.
+JDWP::JdwpError Dbg::GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa,
+ int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
+ mirror::ArtMethod* m = visitor.GetMethod();
+ uint16_t reg = DemangleSlot(slot, m);
+ // TODO: check that the tag is compatible with the actual type of the slot!
+ // TODO: check slot is valid for this method or return INVALID_SLOT error.
+ constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
+ switch (tag) {
+ case JDWP::JT_BOOLEAN: {
+ CHECK_EQ(width, 1U);
+ uint32_t intVal;
+ if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
+ VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
+ JDWP::Set1(buf + 1, intVal != 0);
+ } else {
+ VLOG(jdwp) << "failed to get boolean local " << reg;
+ return kFailureErrorCode;
}
- // TODO: check that the tag is compatible with the actual type of the slot!
- // TODO: check slot is valid for this method or return INVALID_SLOT error.
- mirror::ArtMethod* m = GetMethod();
- if (m->IsNative()) {
- // We can't read local value from native method.
- error_ = JDWP::ERR_OPAQUE_FRAME;
- return false;
- }
- uint16_t reg = DemangleSlot(slot_, m);
- constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
- switch (tag_) {
- case JDWP::JT_BOOLEAN:
- case JDWP::JT_BYTE:
- CHECK_EQ(width_, 1U);
- if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
- VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = "
- << static_cast<uint32_t>(value_);
- error_ = kFailureErrorCode;
- }
- break;
- case JDWP::JT_SHORT:
- case JDWP::JT_CHAR:
- CHECK_EQ(width_, 2U);
- if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
- VLOG(jdwp) << "failed to set short/char local " << reg << " = "
- << static_cast<uint32_t>(value_);
- error_ = kFailureErrorCode;
- }
- break;
- case JDWP::JT_INT:
- CHECK_EQ(width_, 4U);
- if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
- VLOG(jdwp) << "failed to set int local " << reg << " = "
- << static_cast<uint32_t>(value_);
- error_ = kFailureErrorCode;
- }
- break;
- case JDWP::JT_FLOAT:
- CHECK_EQ(width_, 4U);
- if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kFloatVReg)) {
- VLOG(jdwp) << "failed to set float local " << reg << " = "
- << static_cast<uint32_t>(value_);
- error_ = kFailureErrorCode;
- }
- break;
- case JDWP::JT_ARRAY:
- case JDWP::JT_CLASS_LOADER:
- case JDWP::JT_CLASS_OBJECT:
- case JDWP::JT_OBJECT:
- case JDWP::JT_STRING:
- case JDWP::JT_THREAD:
- case JDWP::JT_THREAD_GROUP: {
- CHECK_EQ(width_, sizeof(JDWP::ObjectId));
- mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value_));
- if (o == ObjectRegistry::kInvalidObject) {
- VLOG(jdwp) << tag_ << " object " << o << " is an invalid object";
- error_ = JDWP::ERR_INVALID_OBJECT;
- } else if (!SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
- kReferenceVReg)) {
- VLOG(jdwp) << "failed to set " << tag_ << " object local " << reg << " = " << o;
- error_ = kFailureErrorCode;
- }
- break;
- }
- case JDWP::JT_DOUBLE: {
- CHECK_EQ(width_, 8U);
- bool success = SetVRegPair(m, reg, value_, kDoubleLoVReg, kDoubleHiVReg);
- if (!success) {
- VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
- error_ = kFailureErrorCode;
- }
- break;
- }
- case JDWP::JT_LONG: {
- CHECK_EQ(width_, 8U);
- bool success = SetVRegPair(m, reg, value_, kLongLoVReg, kLongHiVReg);
- if (!success) {
- VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
- error_ = kFailureErrorCode;
- }
- break;
- }
- default:
- LOG(FATAL) << "Unknown tag " << tag_;
- break;
- }
- return false;
+ break;
}
+ case JDWP::JT_BYTE: {
+ CHECK_EQ(width, 1U);
+ uint32_t intVal;
+ if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
+ VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
+ JDWP::Set1(buf + 1, intVal);
+ } else {
+ VLOG(jdwp) << "failed to get byte local " << reg;
+ return kFailureErrorCode;
+ }
+ break;
+ }
+ case JDWP::JT_SHORT:
+ case JDWP::JT_CHAR: {
+ CHECK_EQ(width, 2U);
+ uint32_t intVal;
+ if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
+ VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
+ JDWP::Set2BE(buf + 1, intVal);
+ } else {
+ VLOG(jdwp) << "failed to get short/char local " << reg;
+ return kFailureErrorCode;
+ }
+ break;
+ }
+ case JDWP::JT_INT: {
+ CHECK_EQ(width, 4U);
+ uint32_t intVal;
+ if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
+ VLOG(jdwp) << "get int local " << reg << " = " << intVal;
+ JDWP::Set4BE(buf + 1, intVal);
+ } else {
+ VLOG(jdwp) << "failed to get int local " << reg;
+ return kFailureErrorCode;
+ }
+ break;
+ }
+ case JDWP::JT_FLOAT: {
+ CHECK_EQ(width, 4U);
+ uint32_t intVal;
+ if (visitor.GetVReg(m, reg, kFloatVReg, &intVal)) {
+ VLOG(jdwp) << "get float local " << reg << " = " << intVal;
+ JDWP::Set4BE(buf + 1, intVal);
+ } else {
+ VLOG(jdwp) << "failed to get float local " << reg;
+ return kFailureErrorCode;
+ }
+ break;
+ }
+ case JDWP::JT_ARRAY:
+ case JDWP::JT_CLASS_LOADER:
+ case JDWP::JT_CLASS_OBJECT:
+ case JDWP::JT_OBJECT:
+ case JDWP::JT_STRING:
+ case JDWP::JT_THREAD:
+ case JDWP::JT_THREAD_GROUP: {
+ CHECK_EQ(width, sizeof(JDWP::ObjectId));
+ uint32_t intVal;
+ if (visitor.GetVReg(m, reg, kReferenceVReg, &intVal)) {
+ mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
+ VLOG(jdwp) << "get " << tag << " object local " << reg << " = " << o;
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
+ LOG(FATAL) << "Register " << reg << " expected to hold " << tag << " object: " << o;
+ }
+ tag = TagFromObject(soa, o);
+ JDWP::SetObjectId(buf + 1, gRegistry->Add(o));
+ } else {
+ VLOG(jdwp) << "failed to get " << tag << " object local " << reg;
+ return kFailureErrorCode;
+ }
+ break;
+ }
+ case JDWP::JT_DOUBLE: {
+ CHECK_EQ(width, 8U);
+ uint64_t longVal;
+ if (visitor.GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
+ VLOG(jdwp) << "get double local " << reg << " = " << longVal;
+ JDWP::Set8BE(buf + 1, longVal);
+ } else {
+ VLOG(jdwp) << "failed to get double local " << reg;
+ return kFailureErrorCode;
+ }
+ break;
+ }
+ case JDWP::JT_LONG: {
+ CHECK_EQ(width, 8U);
+ uint64_t longVal;
+ if (visitor.GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) {
+ VLOG(jdwp) << "get long local " << reg << " = " << longVal;
+ JDWP::Set8BE(buf + 1, longVal);
+ } else {
+ VLOG(jdwp) << "failed to get long local " << reg;
+ return kFailureErrorCode;
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unknown tag " << tag;
+ break;
+ }
- const JDWP::FrameId frame_id_;
- const int slot_;
- const JDWP::JdwpTag tag_;
- const uint64_t value_;
- const size_t width_;
- JDWP::JdwpError error_;
- };
+ // Prepend tag, which may have been updated.
+ JDWP::Set1(buf, tag);
+ return JDWP::ERR_NONE;
+}
+
+JDWP::JdwpError Dbg::SetLocalValues(JDWP::Request* request) {
+ JDWP::ObjectId thread_id = request->ReadThreadId();
+ JDWP::FrameId frame_id = request->ReadFrameId();
ScopedObjectAccessUnchecked soa(Thread::Current());
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread;
- JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
- if (error != JDWP::ERR_NONE) {
- return error;
+ {
+ MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
+ JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
}
- // TODO check thread is suspended by the debugger ?
+ // Find the frame with the given frame_id.
std::unique_ptr<Context> context(Context::Create());
- SetLocalVisitor visitor(thread, context.get(), frame_id, slot, tag, value, width);
+ FindFrameVisitor visitor(thread, context.get(), frame_id);
visitor.WalkStack();
- return visitor.error_;
+ if (visitor.GetError() != JDWP::ERR_NONE) {
+ return visitor.GetError();
+ }
+
+ // Writes the values into visitor's context.
+ int32_t slot_count = request->ReadSigned32("slot count");
+ for (int32_t i = 0; i < slot_count; ++i) {
+ uint32_t slot = request->ReadUnsigned32("slot");
+ JDWP::JdwpTag sigByte = request->ReadTag();
+ size_t width = Dbg::GetTagWidth(sigByte);
+ uint64_t value = request->ReadValue(width);
+
+ VLOG(jdwp) << " --> slot " << slot << " " << sigByte << " " << value;
+ JDWP::JdwpError error = Dbg::SetLocalValue(visitor, slot, sigByte, value, width);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
+ }
+ return JDWP::ERR_NONE;
+}
+
+JDWP::JdwpError Dbg::SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTag tag,
+ uint64_t value, size_t width) {
+ mirror::ArtMethod* m = visitor.GetMethod();
+ uint16_t reg = DemangleSlot(slot, m);
+ // TODO: check that the tag is compatible with the actual type of the slot!
+ // TODO: check slot is valid for this method or return INVALID_SLOT error.
+ constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
+ switch (tag) {
+ case JDWP::JT_BOOLEAN:
+ case JDWP::JT_BYTE:
+ CHECK_EQ(width, 1U);
+ if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kIntVReg)) {
+ VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = "
+ << static_cast<uint32_t>(value);
+ return kFailureErrorCode;
+ }
+ break;
+ case JDWP::JT_SHORT:
+ case JDWP::JT_CHAR:
+ CHECK_EQ(width, 2U);
+ if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kIntVReg)) {
+ VLOG(jdwp) << "failed to set short/char local " << reg << " = "
+ << static_cast<uint32_t>(value);
+ return kFailureErrorCode;
+ }
+ break;
+ case JDWP::JT_INT:
+ CHECK_EQ(width, 4U);
+ if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kIntVReg)) {
+ VLOG(jdwp) << "failed to set int local " << reg << " = "
+ << static_cast<uint32_t>(value);
+ return kFailureErrorCode;
+ }
+ break;
+ case JDWP::JT_FLOAT:
+ CHECK_EQ(width, 4U);
+ if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kFloatVReg)) {
+ VLOG(jdwp) << "failed to set float local " << reg << " = "
+ << static_cast<uint32_t>(value);
+ return kFailureErrorCode;
+ }
+ break;
+ case JDWP::JT_ARRAY:
+ case JDWP::JT_CLASS_LOADER:
+ case JDWP::JT_CLASS_OBJECT:
+ case JDWP::JT_OBJECT:
+ case JDWP::JT_STRING:
+ case JDWP::JT_THREAD:
+ case JDWP::JT_THREAD_GROUP: {
+ CHECK_EQ(width, sizeof(JDWP::ObjectId));
+ mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value));
+ if (o == ObjectRegistry::kInvalidObject) {
+ VLOG(jdwp) << tag << " object " << o << " is an invalid object";
+ return JDWP::ERR_INVALID_OBJECT;
+ } else if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
+ kReferenceVReg)) {
+ VLOG(jdwp) << "failed to set " << tag << " object local " << reg << " = " << o;
+ return kFailureErrorCode;
+ }
+ break;
+ }
+ case JDWP::JT_DOUBLE: {
+ CHECK_EQ(width, 8U);
+ if (!visitor.SetVRegPair(m, reg, value, kDoubleLoVReg, kDoubleHiVReg)) {
+ VLOG(jdwp) << "failed to set double local " << reg << " = " << value;
+ return kFailureErrorCode;
+ }
+ break;
+ }
+ case JDWP::JT_LONG: {
+ CHECK_EQ(width, 8U);
+ if (!visitor.SetVRegPair(m, reg, value, kLongLoVReg, kLongHiVReg)) {
+ VLOG(jdwp) << "failed to set double local " << reg << " = " << value;
+ return kFailureErrorCode;
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unknown tag " << tag;
+ break;
+ }
+ return JDWP::ERR_NONE;
}
static void SetEventLocation(JDWP::EventLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
@@ -4305,11 +4332,7 @@
Thread* self = Thread::Current();
- // To allow the Walk/InspectAll() below to exclusively-lock the
- // mutator lock, temporarily release the shared access to the
- // mutator lock here by transitioning to the suspended state.
Locks::mutator_lock_->AssertSharedHeld(self);
- self->TransitionFromRunnableToSuspended(kSuspended);
// Send a series of heap segment chunks.
HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
@@ -4323,32 +4346,39 @@
gc::Heap* heap = Runtime::Current()->GetHeap();
for (const auto& space : heap->GetContinuousSpaces()) {
if (space->IsDlMallocSpace()) {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
// dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
// allocation then the first sizeof(size_t) may belong to it.
context.SetChunkOverhead(sizeof(size_t));
space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
} else if (space->IsRosAllocSpace()) {
context.SetChunkOverhead(0);
- space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
+ // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
+ self->TransitionFromRunnableToSuspended(kSuspended);
+ ThreadList* tl = Runtime::Current()->GetThreadList();
+ tl->SuspendAll();
+ {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ }
+ tl->ResumeAll();
+ self->TransitionFromSuspendedToRunnable();
} else if (space->IsBumpPointerSpace()) {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
context.SetChunkOverhead(0);
- ReaderMutexLock mu(self, *Locks::mutator_lock_);
- WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
} else {
UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
}
context.ResetStartOfNextChunk();
}
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
// Walk the large objects, these are not in the AllocSpace.
context.SetChunkOverhead(0);
heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
}
- // Shared-lock the mutator lock back.
- self->TransitionFromSuspendedToRunnable();
- Locks::mutator_lock_->AssertSharedHeld(self);
-
// Finally, send a heap end chunk.
Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 2c81c8d..eaab1f4 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -45,6 +45,7 @@
class AllocRecord;
class ObjectRegistry;
class ScopedObjectAccessUnchecked;
+class StackVisitor;
class Thread;
class ThrowLocation;
@@ -475,12 +476,10 @@
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static JDWP::JdwpError GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
- JDWP::JdwpTag tag, uint8_t* buf, size_t expectedLen)
+ static JDWP::JdwpError GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply)
LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static JDWP::JdwpError SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
- JDWP::JdwpTag tag, uint64_t value, size_t width)
+ static JDWP::JdwpError SetLocalValues(JDWP::Request* request)
LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -641,6 +640,16 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
+ static JDWP::JdwpError GetLocalValue(const StackVisitor& visitor,
+ ScopedObjectAccessUnchecked& soa, int slot,
+ JDWP::JdwpTag tag, uint8_t* buf, size_t width)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static JDWP::JdwpError SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTag tag,
+ uint64_t value, size_t width)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static void DdmBroadcast(bool connect) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void PostThreadStartOrStop(Thread*, uint32_t)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/handle.h b/runtime/handle.h
index f70faf4..f9864dc 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -58,7 +58,7 @@
}
ALWAYS_INLINE T* Get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return reference_->AsMirrorPtr();
+ return down_cast<T*>(reference_->AsMirrorPtr());
}
ALWAYS_INLINE jobject ToJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -70,25 +70,25 @@
}
protected:
- StackReference<T>* reference_;
-
template<typename S>
explicit ConstHandle(StackReference<S>* reference)
- : reference_(reinterpret_cast<StackReference<T>*>(reference)) {
+ : reference_(reference) {
}
template<typename S>
explicit ConstHandle(const ConstHandle<S>& handle)
- : reference_(reinterpret_cast<StackReference<T>*>(handle.reference_)) {
+ : reference_(handle.reference_) {
}
- StackReference<T>* GetReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ StackReference<mirror::Object>* GetReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
return reference_;
}
- ALWAYS_INLINE const StackReference<T>* GetReference() const
+ ALWAYS_INLINE const StackReference<mirror::Object>* GetReference() const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return reference_;
}
+ StackReference<mirror::Object>* reference_;
+
private:
friend class BuildGenericJniFrameVisitor;
template<class S> friend class ConstHandle;
@@ -120,8 +120,8 @@
}
ALWAYS_INLINE T* Assign(T* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- StackReference<T>* ref = ConstHandle<T>::GetReference();
- T* const old = ref->AsMirrorPtr();
+ StackReference<mirror::Object>* ref = Handle<T>::GetReference();
+ T* old = down_cast<T*>(ref->AsMirrorPtr());
ref->Assign(reference);
return old;
}
@@ -131,7 +131,6 @@
: ConstHandle<T>(handle) {
}
- protected:
template<typename S>
explicit Handle(StackReference<S>* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: ConstHandle<T>(reference) {
@@ -152,7 +151,7 @@
}
private:
- StackReference<T> null_ref_;
+ StackReference<mirror::Object> null_ref_;
};
} // namespace art
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 42ef779..0c9986e 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -150,7 +150,7 @@
}
private:
- T** obj_;
+ T** const obj_;
};
// Scoped handle storage of a fixed size that is usually stack allocated.
@@ -160,31 +160,10 @@
explicit StackHandleScope(Thread* self);
~StackHandleScope();
- // Currently unused, using this GetReference instead of the one in HandleScope is preferred to
- // avoid compiler optimizations incorrectly optimizing out of bound array accesses.
- // TODO: Remove this when it is un-necessary.
- mirror::Object* GetReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- ALWAYS_INLINE {
- DCHECK_LT(i, number_of_references_);
- return references_storage_[i].AsMirrorPtr();
- }
-
- Handle<mirror::Object> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- ALWAYS_INLINE {
- DCHECK_LT(i, number_of_references_);
- return Handle<mirror::Object>(&references_storage_[i]);
- }
-
- void SetReference(size_t i, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- ALWAYS_INLINE {
- DCHECK_LT(i, number_of_references_);
- references_storage_[i].Assign(object);
- }
-
template<class T>
Handle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
SetReference(pos_, object);
- Handle<T> h(GetHandle(pos_));
+ Handle<T> h(GetHandle<T>(pos_));
pos_++;
return h;
}
@@ -192,14 +171,26 @@
template<class T>
HandleWrapper<T> NewHandleWrapper(T** object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
SetReference(pos_, *object);
- Handle<T> h(GetHandle(pos_));
+ Handle<T> h(GetHandle<T>(pos_));
pos_++;
return HandleWrapper<T>(object, h);
}
private:
- // References_storage_ needs to be first so that it appears in the same location as
- // HandleScope::references_.
+ template<class T>
+ ALWAYS_INLINE Handle<T> GetHandle(size_t i)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK_LT(i, kNumReferences);
+ return Handle<T>(&references_storage_[i]);
+ }
+
+ ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK_LT(i, kNumReferences);
+ references_storage_[i].Assign(object);
+ }
+
+ // Reference storage needs to be first as expected by the HandleScope layout.
StackReference<mirror::Object> references_storage_[kNumReferences];
// The thread that the stack handle scope is a linked list upon. The stack handle scope will
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index c826716..2bf6ab9 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -46,7 +46,7 @@
AbortIfNoCheckJNI();
return false;
}
- if (UNLIKELY(table_[idx].IsNull())) {
+ if (UNLIKELY(table_[idx].GetReference()->IsNull())) {
LOG(ERROR) << "JNI ERROR (app bug): accessed deleted " << kind_ << " " << iref;
AbortIfNoCheckJNI();
return false;
@@ -76,10 +76,10 @@
return kInvalidIndirectRefObject;
}
uint32_t idx = ExtractIndex(iref);
- mirror::Object* obj = table_[idx].Read<kWithoutReadBarrier>();
+ mirror::Object* obj = table_[idx].GetReference()->Read<kWithoutReadBarrier>();
if (LIKELY(obj != kClearedJniWeakGlobal)) {
// The read barrier or VerifyObject won't handle kClearedJniWeakGlobal.
- obj = table_[idx].Read();
+ obj = table_[idx].GetReference()->Read();
VerifyObject(obj);
}
return obj;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 9b2b82e..2b1a257 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -63,34 +63,22 @@
}
IndirectReferenceTable::IndirectReferenceTable(size_t initialCount,
- size_t maxCount, IndirectRefKind desiredKind) {
+ size_t maxCount, IndirectRefKind desiredKind)
+ : kind_(desiredKind),
+ max_entries_(maxCount) {
CHECK_GT(initialCount, 0U);
CHECK_LE(initialCount, maxCount);
CHECK_NE(desiredKind, kHandleScopeOrInvalid);
std::string error_str;
- const size_t initial_bytes = initialCount * sizeof(const mirror::Object*);
- const size_t table_bytes = maxCount * sizeof(const mirror::Object*);
+ const size_t table_bytes = maxCount * sizeof(IrtEntry);
table_mem_map_.reset(MemMap::MapAnonymous("indirect ref table", nullptr, table_bytes,
PROT_READ | PROT_WRITE, false, &error_str));
CHECK(table_mem_map_.get() != nullptr) << error_str;
CHECK_EQ(table_mem_map_->Size(), table_bytes);
-
- table_ = reinterpret_cast<GcRoot<mirror::Object>*>(table_mem_map_->Begin());
+ table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
CHECK(table_ != nullptr);
- memset(table_, 0xd1, initial_bytes);
-
- const size_t slot_bytes = maxCount * sizeof(IndirectRefSlot);
- slot_mem_map_.reset(MemMap::MapAnonymous("indirect ref table slots", nullptr, slot_bytes,
- PROT_READ | PROT_WRITE, false, &error_str));
- CHECK(slot_mem_map_.get() != nullptr) << error_str;
- slot_data_ = reinterpret_cast<IndirectRefSlot*>(slot_mem_map_->Begin());
- CHECK(slot_data_ != nullptr);
-
segment_state_.all = IRT_FIRST_SEGMENT;
- alloc_entries_ = initialCount;
- max_entries_ = maxCount;
- kind_ = desiredKind;
}
IndirectReferenceTable::~IndirectReferenceTable() {
@@ -104,24 +92,12 @@
CHECK(obj != NULL);
VerifyObject(obj);
DCHECK(table_ != NULL);
- DCHECK_LE(alloc_entries_, max_entries_);
DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
- if (topIndex == alloc_entries_) {
- // reached end of allocated space; did we hit buffer max?
- if (topIndex == max_entries_) {
- LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow "
- << "(max=" << max_entries_ << ")\n"
- << MutatorLockedDumpable<IndirectReferenceTable>(*this);
- }
-
- size_t newSize = alloc_entries_ * 2;
- if (newSize > max_entries_) {
- newSize = max_entries_;
- }
- DCHECK_GT(newSize, alloc_entries_);
-
- alloc_entries_ = newSize;
+ if (topIndex == max_entries_) {
+ LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow "
+ << "(max=" << max_entries_ << ")\n"
+ << MutatorLockedDumpable<IndirectReferenceTable>(*this);
}
// We know there's enough room in the table. Now we just need to find
@@ -129,27 +105,26 @@
// add to the end of the list.
IndirectRef result;
int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles;
+ size_t index;
if (numHoles > 0) {
DCHECK_GT(topIndex, 1U);
// Find the first hole; likely to be near the end of the list.
- GcRoot<mirror::Object>* pScan = &table_[topIndex - 1];
- DCHECK(!pScan->IsNull());
+ IrtEntry* pScan = &table_[topIndex - 1];
+ DCHECK(!pScan->GetReference()->IsNull());
--pScan;
- while (!pScan->IsNull()) {
+ while (!pScan->GetReference()->IsNull()) {
DCHECK_GE(pScan, table_ + prevState.parts.topIndex);
--pScan;
}
- UpdateSlotAdd(obj, pScan - table_);
- result = ToIndirectRef(pScan - table_);
- *pScan = GcRoot<mirror::Object>(obj);
+ index = pScan - table_;
segment_state_.parts.numHoles--;
} else {
// Add to the end.
- UpdateSlotAdd(obj, topIndex);
- result = ToIndirectRef(topIndex);
- table_[topIndex++] = GcRoot<mirror::Object>(obj);
+ index = topIndex++;
segment_state_.parts.topIndex = topIndex;
}
+ table_[index].Add(obj);
+ result = ToIndirectRef(index);
if (false) {
LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.parts.topIndex
<< " holes=" << segment_state_.parts.numHoles;
@@ -182,7 +157,6 @@
int bottomIndex = prevState.parts.topIndex;
DCHECK(table_ != NULL);
- DCHECK_LE(alloc_entries_, max_entries_);
DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
int idx = ExtractIndex(iref);
@@ -192,7 +166,6 @@
LOG(WARNING) << "Attempt to remove local handle scope entry from IRT, ignoring";
return true;
}
-
if (idx < bottomIndex) {
// Wrong segment.
LOG(WARNING) << "Attempt to remove index outside index area (" << idx
@@ -206,23 +179,23 @@
return false;
}
- if (idx == topIndex-1) {
+ if (idx == topIndex - 1) {
// Top-most entry. Scan up and consume holes.
if (!CheckEntry("remove", iref, idx)) {
return false;
}
- table_[idx] = GcRoot<mirror::Object>(nullptr);
+ *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles;
if (numHoles != 0) {
while (--topIndex > bottomIndex && numHoles != 0) {
if (false) {
- LOG(INFO) << "+++ checking for hole at " << topIndex-1
+ LOG(INFO) << "+++ checking for hole at " << topIndex - 1
<< " (cookie=" << cookie << ") val="
- << table_[topIndex - 1].Read<kWithoutReadBarrier>();
+ << table_[topIndex - 1].GetReference()->Read<kWithoutReadBarrier>();
}
- if (!table_[topIndex-1].IsNull()) {
+ if (!table_[topIndex - 1].GetReference()->IsNull()) {
break;
}
if (false) {
@@ -242,7 +215,7 @@
// Not the top-most entry. This creates a hole. We NULL out the
// entry to prevent somebody from deleting it twice and screwing up
// the hole count.
- if (table_[idx].IsNull()) {
+ if (table_[idx].GetReference()->IsNull()) {
LOG(INFO) << "--- WEIRD: removing null entry " << idx;
return false;
}
@@ -250,7 +223,7 @@
return false;
}
- table_[idx] = GcRoot<mirror::Object>(nullptr);
+ *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
segment_state_.parts.numHoles++;
if (false) {
LOG(INFO) << "+++ left hole at " << idx << ", holes=" << segment_state_.parts.numHoles;
@@ -272,7 +245,7 @@
os << kind_ << " table dump:\n";
ReferenceTable::Table entries;
for (size_t i = 0; i < Capacity(); ++i) {
- mirror::Object* obj = table_[i].Read<kWithoutReadBarrier>();
+ mirror::Object* obj = table_[i].GetReference()->Read<kWithoutReadBarrier>();
if (UNLIKELY(obj == nullptr)) {
// Remove NULLs.
} else if (UNLIKELY(obj == kClearedJniWeakGlobal)) {
@@ -280,7 +253,7 @@
// while the read barrier won't.
entries.push_back(GcRoot<mirror::Object>(obj));
} else {
- obj = table_[i].Read();
+ obj = table_[i].GetReference()->Read();
entries.push_back(GcRoot<mirror::Object>(obj));
}
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index fb910e2..5a178ea 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -127,16 +127,6 @@
return static_cast<IndirectRefKind>(reinterpret_cast<uintptr_t>(iref) & 0x03);
}
-/*
- * Extended debugging structure. We keep a parallel array of these, one
- * per slot in the table.
- */
-static const size_t kIRTPrevCount = 4;
-struct IndirectRefSlot {
- uint32_t serial;
- const mirror::Object* previous[kIRTPrevCount];
-};
-
/* use as initial value for "cookie", and when table has only one segment */
static const uint32_t IRT_FIRST_SEGMENT = 0;
@@ -203,9 +193,35 @@
} parts;
};
+// Try to choose kIRTPrevCount so that sizeof(IrtEntry) is a power of 2.
+// Contains multiple entries but only one active one, this helps us detect use after free errors
+// since the serial stored in the indirect ref wont match.
+static const size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3;
+class PACKED(4) IrtEntry {
+ public:
+ void Add(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ++serial_;
+ if (serial_ == kIRTPrevCount) {
+ serial_ = 0;
+ }
+ references_[serial_] = GcRoot<mirror::Object>(obj);
+ }
+ GcRoot<mirror::Object>* GetReference() {
+ DCHECK_LT(serial_, kIRTPrevCount);
+ return &references_[serial_];
+ }
+ uint32_t GetSerial() const {
+ return serial_;
+ }
+
+ private:
+ uint32_t serial_;
+ GcRoot<mirror::Object> references_[kIRTPrevCount];
+};
+
class IrtIterator {
public:
- explicit IrtIterator(GcRoot<mirror::Object>* table, size_t i, size_t capacity)
+ explicit IrtIterator(IrtEntry* table, size_t i, size_t capacity)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: table_(table), i_(i), capacity_(capacity) {
SkipNullsAndTombstones();
@@ -219,7 +235,7 @@
mirror::Object** operator*() {
// This does not have a read barrier as this is used to visit roots.
- return table_[i_].AddressWithoutBarrier();
+ return table_[i_].GetReference()->AddressWithoutBarrier();
}
bool equals(const IrtIterator& rhs) const {
@@ -230,13 +246,13 @@
void SkipNullsAndTombstones() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// We skip NULLs and tombstones. Clients don't want to see implementation details.
while (i_ < capacity_ &&
- (table_[i_].IsNull() ||
- table_[i_].Read<kWithoutReadBarrier>() == kClearedJniWeakGlobal)) {
+ (table_[i_].GetReference()->IsNull() ||
+ table_[i_].GetReference()->Read<kWithoutReadBarrier>() == kClearedJniWeakGlobal)) {
++i_;
}
}
- GcRoot<mirror::Object>* const table_;
+ IrtEntry* const table_;
size_t i_;
size_t capacity_;
};
@@ -329,9 +345,7 @@
}
private:
- /*
- * Extract the table index from an indirect reference.
- */
+ // Extract the table index from an indirect reference.
static uint32_t ExtractIndex(IndirectRef iref) {
uintptr_t uref = reinterpret_cast<uintptr_t>(iref);
return (uref >> 2) & 0xffff;
@@ -343,25 +357,11 @@
*/
IndirectRef ToIndirectRef(uint32_t tableIndex) const {
DCHECK_LT(tableIndex, 65536U);
- uint32_t serialChunk = slot_data_[tableIndex].serial;
- uintptr_t uref = serialChunk << 20 | (tableIndex << 2) | kind_;
+ uint32_t serialChunk = table_[tableIndex].GetSerial();
+ uintptr_t uref = (serialChunk << 20) | (tableIndex << 2) | kind_;
return reinterpret_cast<IndirectRef>(uref);
}
- /*
- * Update extended debug info when an entry is added.
- *
- * We advance the serial number, invalidating any outstanding references to
- * this slot.
- */
- void UpdateSlotAdd(const mirror::Object* obj, int slot) {
- if (slot_data_ != NULL) {
- IndirectRefSlot* pSlot = &slot_data_[slot];
- pSlot->serial++;
- pSlot->previous[pSlot->serial % kIRTPrevCount] = obj;
- }
- }
-
// Abort if check_jni is not enabled.
static void AbortIfNoCheckJNI();
@@ -374,19 +374,13 @@
// Mem map where we store the indirect refs.
std::unique_ptr<MemMap> table_mem_map_;
- // Mem map where we store the extended debugging info.
- std::unique_ptr<MemMap> slot_mem_map_;
// bottom of the stack. Do not directly access the object references
// in this as they are roots. Use Get() that has a read barrier.
- GcRoot<mirror::Object>* table_;
+ IrtEntry* table_;
/* bit mask, ORed into all irefs */
- IndirectRefKind kind_;
- /* extended debugging info */
- IndirectRefSlot* slot_data_;
- /* #of entries we have space for */
- size_t alloc_entries_;
+ const IndirectRefKind kind_;
/* max #of entries allowed */
- size_t max_entries_;
+ const size_t max_entries_;
};
} // namespace art
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 6888241..e6d7e7b 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1385,26 +1385,7 @@
*/
static JdwpError SF_GetValues(JdwpState*, Request& request, ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ObjectId thread_id = request.ReadThreadId();
- FrameId frame_id = request.ReadFrameId();
- int32_t slot_count = request.ReadSigned32("slot count");
-
- expandBufAdd4BE(pReply, slot_count); /* "int values" */
- for (int32_t i = 0; i < slot_count; ++i) {
- uint32_t slot = request.ReadUnsigned32("slot");
- JDWP::JdwpTag reqSigByte = request.ReadTag();
-
- VLOG(jdwp) << " --> slot " << slot << " " << reqSigByte;
-
- size_t width = Dbg::GetTagWidth(reqSigByte);
- uint8_t* ptr = expandBufAddSpace(pReply, width+1);
- JdwpError error = Dbg::GetLocalValue(thread_id, frame_id, slot, reqSigByte, ptr, width);
- if (error != ERR_NONE) {
- return error;
- }
- }
-
- return ERR_NONE;
+ return Dbg::GetLocalValues(&request, pReply);
}
/*
@@ -1412,24 +1393,7 @@
*/
static JdwpError SF_SetValues(JdwpState*, Request& request, ExpandBuf*)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ObjectId thread_id = request.ReadThreadId();
- FrameId frame_id = request.ReadFrameId();
- int32_t slot_count = request.ReadSigned32("slot count");
-
- for (int32_t i = 0; i < slot_count; ++i) {
- uint32_t slot = request.ReadUnsigned32("slot");
- JDWP::JdwpTag sigByte = request.ReadTag();
- size_t width = Dbg::GetTagWidth(sigByte);
- uint64_t value = request.ReadValue(width);
-
- VLOG(jdwp) << " --> slot " << slot << " " << sigByte << " " << value;
- JdwpError error = Dbg::SetLocalValue(thread_id, frame_id, slot, sigByte, value, width);
- if (error != ERR_NONE) {
- return error;
- }
- }
-
- return ERR_NONE;
+ return Dbg::SetLocalValues(&request);
}
static JdwpError SF_ThisObject(JdwpState*, Request& request, ExpandBuf* reply)
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 43ac98d..03a8563 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -498,7 +498,7 @@
// The size of java.lang.Class.class.
static uint32_t ClassClassSize() {
// The number of vtable entries in java.lang.Class.
- uint32_t vtable_entries = Object::kVTableLength + 64;
+ uint32_t vtable_entries = Object::kVTableLength + 66;
return ComputeClassSize(true, vtable_entries, 0, 1, 0);
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index bcb9e0c..9848382 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -172,9 +172,6 @@
BackgroundMethodSamplingProfiler::Shutdown();
}
- // Shutdown the fault manager if it was initialized.
- fault_manager.Shutdown();
-
Trace::Shutdown();
// Make sure to let the GC complete if it is running.
@@ -187,6 +184,10 @@
// Make sure all other non-daemon threads have terminated, and all daemon threads are suspended.
delete thread_list_;
+
+ // Shutdown the fault manager if it was initialized.
+ fault_manager.Shutdown();
+
delete monitor_list_;
delete monitor_pool_;
delete class_linker_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 6b65f12..45d799d 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -978,10 +978,36 @@
}
void Thread::DumpJavaStack(std::ostream& os) const {
+ // Dumping the Java stack involves the verifier for locks. The verifier operates under the
+ // assumption that there is no exception pending on entry. Thus, stash any pending exception.
+ // TODO: Find a way to avoid const_cast.
+ StackHandleScope<3> scope(const_cast<Thread*>(this));
+ Handle<mirror::Throwable> exc;
+ Handle<mirror::Object> throw_location_this_object;
+ Handle<mirror::ArtMethod> throw_location_method;
+ uint32_t throw_location_dex_pc;
+ bool have_exception = false;
+ if (IsExceptionPending()) {
+ ThrowLocation exc_location;
+ exc = scope.NewHandle(GetException(&exc_location));
+ throw_location_this_object = scope.NewHandle(exc_location.GetThis());
+ throw_location_method = scope.NewHandle(exc_location.GetMethod());
+ throw_location_dex_pc = exc_location.GetDexPc();
+ const_cast<Thread*>(this)->ClearException();
+ have_exception = true;
+ }
+
std::unique_ptr<Context> context(Context::Create());
StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
!tls32_.throwing_OutOfMemoryError);
dumper.WalkStack();
+
+ if (have_exception) {
+ ThrowLocation exc_location(throw_location_this_object.Get(),
+ throw_location_method.Get(),
+ throw_location_dex_pc);
+ const_cast<Thread*>(this)->SetException(exc_location, exc.Get());
+ }
}
void Thread::DumpStack(std::ostream& os) const {
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index b48fcd4..011bf96 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -34,6 +34,7 @@
#include "monitor.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
+#include "trace.h"
#include "utils.h"
#include "well_known_classes.h"
@@ -166,6 +167,8 @@
Runtime* runtime = Runtime::Current();
std::ostringstream ss;
ss << "Thread suspend timeout\n";
+ Locks::mutator_lock_->Dump(ss);
+ ss << "\n";
runtime->GetThreadList()->DumpLocked(ss);
LOG(FATAL) << ss.str();
exit(0);
@@ -836,6 +839,9 @@
// suspend and so on, must happen at this point, and not in ~Thread.
self->Destroy();
+ // If tracing, remember thread id and name before thread exits.
+ Trace::StoreExitingThreadInfo(self);
+
uint32_t thin_lock_id = self->GetThreadId();
while (self != nullptr) {
// Remove and delete the Thread* while holding the thread_list_lock_ and
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 4bb388f..4c5e909 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -706,9 +706,21 @@
void Trace::DumpThreadList(std::ostream& os) {
Thread* self = Thread::Current();
+ for (auto it : exited_threads_) {
+ os << it.first << "\t" << it.second << "\n";
+ }
Locks::thread_list_lock_->AssertNotHeld(self);
MutexLock mu(self, *Locks::thread_list_lock_);
Runtime::Current()->GetThreadList()->ForEach(DumpThread, &os);
}
+void Trace::StoreExitingThreadInfo(Thread* thread) {
+ MutexLock mu(thread, *Locks::trace_lock_);
+ if (the_trace_ != nullptr) {
+ std::string name;
+ thread->GetThreadName(name);
+ the_trace_->exited_threads_.Put(thread->GetTid(), name);
+ }
+}
+
} // namespace art
diff --git a/runtime/trace.h b/runtime/trace.h
index 45a02da..ead1c29 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -104,6 +104,8 @@
static std::vector<mirror::ArtMethod*>* AllocStackTrace();
// Clear and store an old stack trace for later use.
static void FreeStackTrace(std::vector<mirror::ArtMethod*>* stack_trace);
+ // Save id and name of a thread before it exits.
+ static void StoreExitingThreadInfo(Thread* thread);
private:
explicit Trace(File* trace_file, int buffer_size, int flags, bool sampling_enabled);
@@ -166,6 +168,9 @@
// Did we overflow the buffer recording traces?
bool overflow_;
+ // Map of thread ids and names that have already exited.
+ SafeMap<pid_t, std::string> exited_threads_;
+
DISALLOW_COPY_AND_ASSIGN(Trace);
};
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 1720e18..5adce49 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -506,9 +506,9 @@
}
}
failures_.push_back(error);
- std::string location(StringPrintf("%s: [0x%X]", PrettyMethod(dex_method_idx_, *dex_file_).c_str(),
+ std::string location(StringPrintf("%s: [0x%X] ", PrettyMethod(dex_method_idx_, *dex_file_).c_str(),
work_insn_idx_));
- std::ostringstream* failure_message = new std::ostringstream(location);
+ std::ostringstream* failure_message = new std::ostringstream(location, std::ostringstream::ate);
failure_messages_.push_back(failure_message);
return *failure_message;
}
@@ -523,7 +523,7 @@
DCHECK_NE(failure_num, 0U);
std::ostringstream* last_fail_message = failure_messages_[failure_num - 1];
prepend += last_fail_message->str();
- failure_messages_[failure_num - 1] = new std::ostringstream(prepend);
+ failure_messages_[failure_num - 1] = new std::ostringstream(prepend, std::ostringstream::ate);
delete last_fail_message;
}
diff --git a/sigchainlib/Android.mk b/sigchainlib/Android.mk
index d86735d..e52adfc 100644
--- a/sigchainlib/Android.mk
+++ b/sigchainlib/Android.mk
@@ -22,10 +22,10 @@
LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
LOCAL_MODULE_TAGS := optional
LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
-LOCAL_SRC_FILES := sigchain.cc
+LOCAL_SRC_FILES := sigchain_dummy.cc
LOCAL_CLANG = $(ART_TARGET_CLANG)
LOCAL_MODULE:= libsigchain
-LOCAL_SHARED_LIBRARIES := liblog libdl
+LOCAL_SHARED_LIBRARIES := liblog
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common_build.mk
include $(BUILD_SHARED_LIBRARY)
@@ -37,7 +37,7 @@
LOCAL_IS_HOST_MODULE := true
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
LOCAL_CLANG = $(ART_HOST_CLANG)
-LOCAL_SRC_FILES := sigchain.cc
+LOCAL_SRC_FILES := sigchain_dummy.cc
LOCAL_MODULE:= libsigchain
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
LOCAL_LDLIBS = -ldl
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index 4991891..4f16c7f 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -99,21 +99,20 @@
}
}
-
// Claim a signal chain for a particular signal.
-void ClaimSignalChain(int signal, struct sigaction* oldaction) {
+extern "C" void ClaimSignalChain(int signal, struct sigaction* oldaction) {
CheckSignalValid(signal);
user_sigactions[signal].Claim(*oldaction);
}
-void UnclaimSignalChain(int signal) {
+extern "C" void UnclaimSignalChain(int signal) {
CheckSignalValid(signal);
user_sigactions[signal].Unclaim(signal);
}
// Invoke the user's signal handler.
-void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context) {
+extern "C" void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context) {
// Check the arguments.
CheckSignalValid(sig);
@@ -140,7 +139,7 @@
}
}
-void EnsureFrontOfChain(int signal, struct sigaction* expected_action) {
+extern "C" void EnsureFrontOfChain(int signal, struct sigaction* expected_action) {
CheckSignalValid(signal);
// Read the current action without looking at the chain, it should be the expected action.
SigActionFnPtr linked_sigaction = reinterpret_cast<SigActionFnPtr>(linked_sigaction_sym);
@@ -155,10 +154,9 @@
}
}
-extern "C" {
// These functions are C linkage since they replace the functions in libc.
-int sigaction(int signal, const struct sigaction* new_action, struct sigaction* old_action) {
+extern "C" int sigaction(int signal, const struct sigaction* new_action, struct sigaction* old_action) {
// If this signal has been claimed as a signal chain, record the user's
// action but don't pass it on to the kernel.
// Note that we check that the signal number is in range here. An out of range signal
@@ -192,7 +190,7 @@
return linked_sigaction(signal, new_action, old_action);
}
-int sigprocmask(int how, const sigset_t* bionic_new_set, sigset_t* bionic_old_set) {
+extern "C" int sigprocmask(int how, const sigset_t* bionic_new_set, sigset_t* bionic_old_set) {
const sigset_t* new_set_ptr = bionic_new_set;
sigset_t tmpset;
if (bionic_new_set != NULL) {
@@ -224,9 +222,8 @@
SigProcMask linked_sigprocmask= reinterpret_cast<SigProcMask>(linked_sigprocmask_sym);
return linked_sigprocmask(how, new_set_ptr, bionic_old_set);
}
-} // extern "C"
-void InitializeSignalChain() {
+extern "C" void InitializeSignalChain() {
// Warning.
// Don't call this from within a signal context as it makes calls to
// dlsym. Calling into the dynamic linker will result in locks being
diff --git a/sigchainlib/sigchain.h b/sigchainlib/sigchain.h
index be69ca4..59a1f1e 100644
--- a/sigchainlib/sigchain.h
+++ b/sigchainlib/sigchain.h
@@ -19,18 +19,14 @@
#include <signal.h>
-namespace art {
+extern "C" void InitializeSignalChain();
-void InitializeSignalChain();
+extern "C" void ClaimSignalChain(int signal, struct sigaction* oldaction);
-void ClaimSignalChain(int signal, struct sigaction* oldaction);
+extern "C" void EnsureFrontOfChain(int signal, struct sigaction* expected_action);
-void EnsureFrontOfChain(int signal, struct sigaction* expected_action);
+extern "C" void UnclaimSignalChain(int signal);
-void UnclaimSignalChain(int signal);
-
-void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context);
-
-} // namespace art
+extern "C" void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context);
#endif // ART_SIGCHAINLIB_SIGCHAIN_H_
diff --git a/sigchainlib/sigchain_dummy.cc b/sigchainlib/sigchain_dummy.cc
new file mode 100644
index 0000000..b0a6ebb
--- /dev/null
+++ b/sigchainlib/sigchain_dummy.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef HAVE_ANDROID_OS
+#include <android/log.h>
+#else
+#include <stdarg.h>
+#include <iostream>
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "sigchain.h"
+
+static void log(const char* format, ...) {
+ char buf[256];
+ va_list ap;
+ va_start(ap, format);
+ vsnprintf(buf, sizeof(buf), format, ap);
+#ifdef HAVE_ANDROID_OS
+ __android_log_write(ANDROID_LOG_ERROR, "libsigchain", buf);
+#else
+ std::cout << buf << "\n";
+#endif
+ va_end(ap);
+}
+
+extern "C" void ClaimSignalChain(int signal, struct sigaction* oldaction) {
+ log("ClaimSignalChain is not exported by the main executable.");
+ abort();
+}
+
+extern "C" void EnsureFrontOfChain(int signal, struct sigaction* expected_action) {
+ log("EnsureFrontOfChain is not exported by the main executable.");
+ abort();
+}
+
+extern "C" void UnclaimSignalChain(int signal) {
+ log("UnclaimSignalChain is not exported by the main executable.");
+ abort();
+}
+
+extern "C" void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context) {
+ log("InvokeUserSignalHandler is not exported by the main executable.");
+ abort();
+}
+
+extern "C" void InitializeSignalChain() {
+ log("InitializeSignalChain is not exported by the main executable.");
+ abort();
+}
diff --git a/sigchainlib/version-script.txt b/sigchainlib/version-script.txt
new file mode 100644
index 0000000..5c72a3e
--- /dev/null
+++ b/sigchainlib/version-script.txt
@@ -0,0 +1,13 @@
+{
+global:
+ ClaimSignalChain;
+ EnsureFrontOfChain;
+ UnclaimSignalChain;
+ InvokeUserSignalHandler;
+ InitializeSignalChain;
+ sigaction;
+ signal;
+ sigprocmask;
+local:
+ *;
+};
diff --git a/test/046-reflect/expected.txt b/test/046-reflect/expected.txt
index ecb3599..fa053fb 100644
--- a/test/046-reflect/expected.txt
+++ b/test/046-reflect/expected.txt
@@ -123,3 +123,17 @@
fields are .equals
methods are unique
methods are .equals
+type1 is a ParameterizedType
+type2 is a ParameterizedType
+type3 is a ParameterizedType
+type1(java.util.Set<java.lang.String>) equals type2(java.util.Set<java.lang.String>)
+type1(java.util.Set<java.lang.String>) equals type3(java.util.Set<java.lang.String>)
+type1(java.util.Set<java.lang.String>) hashCode equals type2(java.util.Set<java.lang.String>) hashCode
+type1(java.util.Set<java.lang.String>) hashCode equals type3(java.util.Set<java.lang.String>) hashCode
+type1 is a GenericArrayType
+type2 is a GenericArrayType
+type3 is a GenericArrayType
+type1(T[]) equals type2(T[])
+type1(T[]) equals type3(T[])
+type1(T[]) hashCode equals type2(T[]) hashCode
+type1(T[]) hashCode equals type3(T[]) hashCode
diff --git a/test/046-reflect/src/Main.java b/test/046-reflect/src/Main.java
index 3e6d700..11eb773 100644
--- a/test/046-reflect/src/Main.java
+++ b/test/046-reflect/src/Main.java
@@ -18,8 +18,10 @@
import java.io.IOException;
import java.util.Collections;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.Map;
+import java.util.Set;
/**
* Reflection test.
@@ -579,6 +581,118 @@
}
}
+ public static void checkParametrizedTypeEqualsAndHashCode() {
+ Method method1;
+ Method method2;
+ Method method3;
+ try {
+ method1 = ParametrizedTypeTest.class.getDeclaredMethod("aMethod", Set.class);
+ method2 = ParametrizedTypeTest.class.getDeclaredMethod("aMethod", Set.class);
+ method3 = ParametrizedTypeTest.class.getDeclaredMethod("aMethodIdentical", Set.class);
+ } catch (NoSuchMethodException nsme) {
+ throw new RuntimeException(nsme);
+ }
+
+ List<Type> types1 = Arrays.asList(method1.getGenericParameterTypes());
+ List<Type> types2 = Arrays.asList(method2.getGenericParameterTypes());
+ List<Type> types3 = Arrays.asList(method3.getGenericParameterTypes());
+
+ Type type1 = types1.get(0);
+ Type type2 = types2.get(0);
+ Type type3 = types3.get(0);
+
+ if (type1 instanceof ParameterizedType) {
+ System.out.println("type1 is a ParameterizedType");
+ }
+ if (type2 instanceof ParameterizedType) {
+ System.out.println("type2 is a ParameterizedType");
+ }
+ if (type3 instanceof ParameterizedType) {
+ System.out.println("type3 is a ParameterizedType");
+ }
+
+ if (type1.equals(type2)) {
+ System.out.println("type1("+type1+") equals type2("+type2+")");
+ } else {
+ System.out.println("type1("+type1+") does not equal type2("+type2+")");
+ }
+
+ if (type1.equals(type3)) {
+ System.out.println("type1("+type1+") equals type3("+type3+")");
+ } else {
+ System.out.println("type1("+type1+") does not equal type3("+type3+")");
+ }
+ if (type1.hashCode() == type2.hashCode()) {
+ System.out.println("type1("+type1+") hashCode equals type2("+type2+") hashCode");
+ } else {
+ System.out.println(
+ "type1("+type1+") hashCode does not equal type2("+type2+") hashCode");
+ }
+
+ if (type1.hashCode() == type3.hashCode()) {
+ System.out.println("type1("+type1+") hashCode equals type3("+type3+") hashCode");
+ } else {
+ System.out.println(
+ "type1("+type1+") hashCode does not equal type3("+type3+") hashCode");
+ }
+ }
+
+ public static void checkGenericArrayTypeEqualsAndHashCode() {
+ Method method1;
+ Method method2;
+ Method method3;
+ try {
+ method1 = GenericArrayTypeTest.class.getDeclaredMethod("aMethod", Object[].class);
+ method2 = GenericArrayTypeTest.class.getDeclaredMethod("aMethod", Object[].class);
+ method3 = GenericArrayTypeTest.class.getDeclaredMethod("aMethodIdentical", Object[].class);
+ } catch (NoSuchMethodException nsme) {
+ throw new RuntimeException(nsme);
+ }
+
+ List<Type> types1 = Arrays.asList(method1.getGenericParameterTypes());
+ List<Type> types2 = Arrays.asList(method2.getGenericParameterTypes());
+ List<Type> types3 = Arrays.asList(method3.getGenericParameterTypes());
+
+ Type type1 = types1.get(0);
+ Type type2 = types2.get(0);
+ Type type3 = types3.get(0);
+
+ if (type1 instanceof GenericArrayType) {
+ System.out.println("type1 is a GenericArrayType");
+ }
+ if (type2 instanceof GenericArrayType) {
+ System.out.println("type2 is a GenericArrayType");
+ }
+ if (type3 instanceof GenericArrayType) {
+ System.out.println("type3 is a GenericArrayType");
+ }
+
+ if (type1.equals(type2)) {
+ System.out.println("type1("+type1+") equals type2("+type2+")");
+ } else {
+ System.out.println("type1("+type1+") does not equal type2("+type2+")");
+ }
+
+ if (type1.equals(type3)) {
+ System.out.println("type1("+type1+") equals type3("+type3+")");
+ } else {
+ System.out.println("type1("+type1+") does not equal type3("+type3+")");
+ }
+ if (type1.hashCode() == type2.hashCode()) {
+ System.out.println("type1("+type1+") hashCode equals type2("+type2+") hashCode");
+ } else {
+ System.out.println(
+ "type1("+type1+") hashCode does not equal type2("+type2+") hashCode");
+ }
+
+ if (type1.hashCode() == type3.hashCode()) {
+ System.out.println("type1("+type1+") hashCode equals type3("+type3+") hashCode");
+ } else {
+ System.out.println(
+ "type1("+type1+") hashCode does not equal type3("+type3+") hashCode");
+ }
+ }
+
public static void main(String[] args) throws Exception {
Main test = new Main();
test.run();
@@ -589,6 +703,8 @@
checkClinitForMethods();
checkGeneric();
checkUnique();
+ checkParametrizedTypeEqualsAndHashCode();
+ checkGenericArrayTypeEqualsAndHashCode();
}
}
@@ -696,3 +812,13 @@
throw new UnsupportedOperationException();
}
}
+
+class ParametrizedTypeTest {
+ public void aMethod(Set<String> names) {}
+ public void aMethodIdentical(Set<String> names) {}
+}
+
+class GenericArrayTypeTest<T> {
+ public void aMethod(T[] names) {}
+ public void aMethodIdentical(T[] names) {}
+}
diff --git a/test/etc/host-run-test-jar b/test/etc/host-run-test-jar
index 7253a2b..dd0107f 100755
--- a/test/etc/host-run-test-jar
+++ b/test/etc/host-run-test-jar
@@ -192,4 +192,4 @@
fi
cd $ANDROID_BUILD_TOP
-$mkdir_cmd && $prebuild_cmd && LD_PRELOAD=libsigchain.so $cmdline "$@"
+$mkdir_cmd && $prebuild_cmd && $cmdline "$@"