Merge "Fix mac build"
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 45dd7f0..0e46c96 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -48,6 +48,7 @@
true, // kIntrinsicMinMaxFloat
true, // kIntrinsicMinMaxDouble
true, // kIntrinsicSqrt
+ false, // kIntrinsicGet
false, // kIntrinsicCharAt
false, // kIntrinsicCompareTo
false, // kIntrinsicIsEmptyOrLength
@@ -74,6 +75,7 @@
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxFloat], MinMaxFloat_must_be_static);
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxDouble], MinMaxDouble_must_be_static);
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSqrt], Sqrt_must_be_static);
+COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicGet], Get_must_not_be_static);
COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCharAt], CharAt_must_not_be_static);
COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCompareTo], CompareTo_must_not_be_static);
COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], IsEmptyOrLength_must_not_be_static);
@@ -126,6 +128,7 @@
"D", // kClassCacheDouble
"V", // kClassCacheVoid
"Ljava/lang/Object;", // kClassCacheJavaLangObject
+ "Ljava/lang/ref/Reference;", // kClassCacheJavaLangRefReference
"Ljava/lang/String;", // kClassCacheJavaLangString
"Ljava/lang/Double;", // kClassCacheJavaLangDouble
"Ljava/lang/Float;", // kClassCacheJavaLangFloat
@@ -152,6 +155,7 @@
"max", // kNameCacheMax
"min", // kNameCacheMin
"sqrt", // kNameCacheSqrt
+ "get", // kNameCacheGet
"charAt", // kNameCacheCharAt
"compareTo", // kNameCacheCompareTo
"isEmpty", // kNameCacheIsEmpty
@@ -220,6 +224,8 @@
{ kClassCacheBoolean, 0, { } },
// kProtoCache_I
{ kClassCacheInt, 0, { } },
+ // kProtoCache_Object
+ { kClassCacheJavaLangObject, 0, { } },
// kProtoCache_Thread
{ kClassCacheJavaLangThread, 0, { } },
// kProtoCacheJ_B
@@ -308,6 +314,8 @@
INTRINSIC(JavaLangMath, Sqrt, D_D, kIntrinsicSqrt, 0),
INTRINSIC(JavaLangStrictMath, Sqrt, D_D, kIntrinsicSqrt, 0),
+ INTRINSIC(JavaLangRefReference, Get, _Object, kIntrinsicGet, 0),
+
INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0),
INTRINSIC(JavaLangString, CompareTo, String_I, kIntrinsicCompareTo, 0),
INTRINSIC(JavaLangString, IsEmpty, _Z, kIntrinsicIsEmptyOrLength, kIntrinsicFlagIsEmpty),
@@ -428,6 +436,8 @@
return backend->GenInlinedMinMaxFP(info, intrinsic.d.data & kIntrinsicFlagMin, true /* is_double */);
case kIntrinsicSqrt:
return backend->GenInlinedSqrt(info);
+ case kIntrinsicGet:
+ return backend->GenInlinedGet(info);
case kIntrinsicCharAt:
return backend->GenInlinedCharAt(info);
case kIntrinsicCompareTo:
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 5b3b104..cb8c165 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -107,6 +107,7 @@
kClassCacheDouble,
kClassCacheVoid,
kClassCacheJavaLangObject,
+ kClassCacheJavaLangRefReference,
kClassCacheJavaLangString,
kClassCacheJavaLangDouble,
kClassCacheJavaLangFloat,
@@ -140,6 +141,7 @@
kNameCacheMax,
kNameCacheMin,
kNameCacheSqrt,
+ kNameCacheGet,
kNameCacheCharAt,
kNameCacheCompareTo,
kNameCacheIsEmpty,
@@ -199,6 +201,7 @@
kProtoCacheString_I,
kProtoCache_Z,
kProtoCache_I,
+ kProtoCache_Object,
kProtoCache_Thread,
kProtoCacheJ_B,
kProtoCacheJ_I,
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 1dbf2ea..b9205f8 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -22,9 +22,13 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "invoke_type.h"
#include "mirror/array.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache.h"
#include "mirror/object_array-inl.h"
+#include "mirror/reference.h"
#include "mirror/string.h"
#include "mir_to_lir-inl.h"
+#include "scoped_thread_state_change.h"
#include "x86/codegen_x86.h"
namespace art {
@@ -1258,6 +1262,85 @@
return res;
}
+bool Mir2Lir::GenInlinedGet(CallInfo* info) {
+ if (cu_->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+
+ // the refrence class is stored in the image dex file which might not be the same as the cu's
+ // dex file. Query the reference class for the image dex file then reset to starting dex file
+ // in after loading class type.
+ uint16_t type_idx = 0;
+ const DexFile* ref_dex_file = nullptr;
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ type_idx = mirror::Reference::GetJavaLangRefReference()->GetDexTypeIndex();
+ ref_dex_file = mirror::Reference::GetJavaLangRefReference()->GetDexCache()->GetDexFile();
+ }
+ CHECK(LIKELY(ref_dex_file != nullptr));
+
+ // address is either static within the image file, or needs to be patched up after compilation.
+ bool unused_type_initialized;
+ bool use_direct_type_ptr;
+ uintptr_t direct_type_ptr;
+ bool is_finalizable;
+ const DexFile* old_dex = cu_->dex_file;
+ cu_->dex_file = ref_dex_file;
+ if (!cu_->compiler_driver->CanEmbedTypeInCode(*ref_dex_file, type_idx, &unused_type_initialized,
+ &use_direct_type_ptr, &direct_type_ptr,
+ &is_finalizable) || is_finalizable) {
+ cu_->dex_file = old_dex;
+ // address is not known and post-compile patch is not possible, cannot insert intrinsic.
+ return false;
+ }
+ if (use_direct_type_ptr) {
+ LoadConstant(TargetReg(kArg1), direct_type_ptr);
+ } else {
+ LoadClassType(type_idx, kArg1);
+ }
+ cu_->dex_file = old_dex;
+
+ // intrinsic logic start.
+ RegLocation rl_obj = info->args[0];
+ rl_obj = LoadValue(rl_obj);
+
+ RegStorage reg_class = TargetReg(kArg1, cu_->target64);
+ RegStorage reg_slow_path = AllocTemp();
+ RegStorage reg_disabled = AllocTemp();
+ Load32Disp(reg_class, mirror::ReferenceClass::SlowPathEnabledOffset().Int32Value(),
+ reg_slow_path);
+ Load32Disp(reg_class, mirror::ReferenceClass::DisableIntrinsicOffset().Int32Value(),
+ reg_disabled);
+ OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
+ FreeTemp(reg_disabled);
+
+ // if slow path, jump to JNI path target
+ LIR* slow_path_branch = OpCmpImmBranch(kCondNe, reg_slow_path, 0, nullptr);
+ FreeTemp(reg_slow_path);
+
+ // slow path not enabled, simply load the referent of the reference object
+ RegLocation rl_dest = InlineTarget(info);
+ RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
+ GenNullCheck(rl_obj.reg, info->opt_flags);
+ LoadRefDisp(rl_obj.reg, mirror::Reference::ReferentOffset().Int32Value(), rl_result.reg,
+ kNotVolatile);
+ MarkPossibleNullPointerException(info->opt_flags);
+ StoreValue(rl_dest, rl_result);
+ LIR* jump_finished = OpUnconditionalBranch(nullptr);
+
+ // JNI target
+ LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
+ slow_path_branch->target = slow_path_target;
+ ResetRegPool();
+ GenInvokeNoInline(info);
+
+ LIR* finished_target = NewLIR0(kPseudoTargetLabel);
+ jump_finished->target = finished_target;
+
+ return true;
+}
+
bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
if (cu_->instruction_set == kMips) {
// TODO - add Mips implementation
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 1789e25..8528dba 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -983,6 +983,7 @@
*/
RegLocation InlineTargetWide(CallInfo* info);
+ bool GenInlinedGet(CallInfo* info);
bool GenInlinedCharAt(CallInfo* info);
bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size);
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 3774b32..f1b5ddf 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -97,6 +97,7 @@
mirror/class.cc \
mirror/dex_cache.cc \
mirror/object.cc \
+ mirror/reference.cc \
mirror/stack_trace_element.cc \
mirror/string.cc \
mirror/throwable.cc \
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 5180e34..a957eb6 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -254,6 +254,13 @@
java_lang_String->SetObjectSize(sizeof(mirror::String));
java_lang_String->SetStatus(mirror::Class::kStatusResolved, self);
+ // Setup Reference.
+ Handle<mirror::Class> java_lang_ref_Reference(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::ReferenceClass))));
+ mirror::Reference::SetClass(down_cast<mirror::ReferenceClass*>(java_lang_ref_Reference.Get()));
+ java_lang_ref_Reference->SetObjectSize(sizeof(mirror::Reference));
+ java_lang_ref_Reference->SetStatus(mirror::Class::kStatusResolved, self);
+
// Create storage for root classes, save away our work so far (requires descriptors).
class_roots_ = mirror::ObjectArray<mirror::Class>::Alloc(self, object_array_class.Get(),
kClassRootsMax);
@@ -264,6 +271,7 @@
SetClassRoot(kObjectArrayClass, object_array_class.Get());
SetClassRoot(kCharArrayClass, char_array_class.Get());
SetClassRoot(kJavaLangString, java_lang_String.Get());
+ SetClassRoot(kJavaLangRefReference, java_lang_ref_Reference.Get());
// Setup the primitive type classes.
SetClassRoot(kPrimitiveBoolean, CreatePrimitiveClass(self, Primitive::kPrimBoolean));
@@ -452,8 +460,12 @@
SetClassRoot(kJavaLangReflectProxy, java_lang_reflect_Proxy);
// java.lang.ref classes need to be specially flagged, but otherwise are normal classes
- mirror::Class* java_lang_ref_Reference = FindSystemClass(self, "Ljava/lang/ref/Reference;");
- SetClassRoot(kJavaLangRefReference, java_lang_ref_Reference);
+ // finish initializing Reference class
+ java_lang_ref_Reference->SetStatus(mirror::Class::kStatusNotReady, self);
+ mirror::Class* Reference_class = FindSystemClass(self, "Ljava/lang/ref/Reference;");
+ CHECK_EQ(java_lang_ref_Reference.Get(), Reference_class);
+ CHECK_EQ(java_lang_ref_Reference->GetObjectSize(), sizeof(mirror::Reference));
+ CHECK_EQ(java_lang_ref_Reference->GetClassSize(), sizeof(mirror::ReferenceClass));
mirror::Class* java_lang_ref_FinalizerReference =
FindSystemClass(self, "Ljava/lang/ref/FinalizerReference;");
java_lang_ref_FinalizerReference->SetAccessFlags(
@@ -538,6 +550,9 @@
CHECK(array_iftable_ != NULL);
+ // disable slow path for reference gets
+ mirror::Reference::GetJavaLangRefReference()->Init();
+
// disable the slow paths in FindClass and CreatePrimitiveClass now
// that Object, Class, and Object[] are setup
init_done_ = true;
@@ -1220,6 +1235,8 @@
array_iftable_ = GetClassRoot(kObjectArrayClass)->GetIfTable();
DCHECK(array_iftable_ == GetClassRoot(kBooleanArrayClass)->GetIfTable());
// String class root was set above
+ mirror::Reference::SetClass(down_cast<mirror::ReferenceClass*>(GetClassRoot(
+ kJavaLangRefReference)));
mirror::ArtField::SetClass(GetClassRoot(kJavaLangReflectArtField));
mirror::BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass));
mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
@@ -1343,6 +1360,7 @@
ClassLinker::~ClassLinker() {
mirror::Class::ResetClass();
mirror::String::ResetClass();
+ mirror::Reference::ResetClass();
mirror::ArtField::ResetClass();
mirror::ArtMethod::ResetClass();
mirror::BooleanArray::ResetArrayClass();
@@ -1587,6 +1605,8 @@
klass.Assign(GetClassRoot(kJavaLangClass));
} else if (strcmp(descriptor, "Ljava/lang/String;") == 0) {
klass.Assign(GetClassRoot(kJavaLangString));
+ } else if (strcmp(descriptor, "Ljava/lang/ref/Reference;") == 0) {
+ klass.Assign(GetClassRoot(kJavaLangRefReference));
} else if (strcmp(descriptor, "Ljava/lang/DexCache;") == 0) {
klass.Assign(GetClassRoot(kJavaLangDexCache));
} else if (strcmp(descriptor, "Ljava/lang/reflect/ArtField;") == 0) {
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 04f6946..8fdead6 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -626,6 +626,13 @@
};
};
+struct ReferenceClassOffsets : public CheckOffsets<mirror::ReferenceClass> {
+ ReferenceClassOffsets() : CheckOffsets<mirror::ReferenceClass>(true, "Ljava/lang/ref/Reference;") {
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ReferenceClass, disable_intrinsic_), "disableIntrinsic"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ReferenceClass, slow_path_enabled_), "slowPathEnabled"));
+ };
+};
+
struct FinalizerReferenceOffsets : public CheckOffsets<mirror::FinalizerReference> {
FinalizerReferenceOffsets() : CheckOffsets<mirror::FinalizerReference>(false, "Ljava/lang/ref/FinalizerReference;") {
// alphabetical references
@@ -651,6 +658,7 @@
EXPECT_TRUE(ProxyOffsets().Check());
EXPECT_TRUE(DexCacheOffsets().Check());
EXPECT_TRUE(ReferenceOffsets().Check());
+ EXPECT_TRUE(ReferenceClassOffsets().Check());
EXPECT_TRUE(FinalizerReferenceOffsets().Check());
EXPECT_TRUE(ClassClassOffsets().Check());
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index d26635f..c66e80d 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -2112,30 +2112,40 @@
// result in occasionally not releasing pages which we could release.
byte pm = page_map_[i];
switch (pm) {
+ case kPageMapReleased:
+ // Fall through.
case kPageMapEmpty: {
- // Only lock if we have an empty page since we want to prevent other threads racing in.
+ // This is currently the start of a free page run.
+ // Acquire the lock to prevent other threads racing in and modifying the page map.
MutexLock mu(self, lock_);
// Check that it's still empty after we acquired the lock since another thread could have
// raced in and placed an allocation here.
- pm = page_map_[i];
- if (LIKELY(pm == kPageMapEmpty)) {
- // The start of a free page run. Release pages.
+ if (IsFreePage(i)) {
+ // Free page runs can start with a released page if we coalesced a released page free
+ // page run with an empty page run.
FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
- DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
- size_t fpr_size = fpr->ByteSize(this);
- DCHECK(IsAligned<kPageSize>(fpr_size));
- byte* start = reinterpret_cast<byte*>(fpr);
- reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
- i += fpr_size / kPageSize;
- DCHECK_LE(i, page_map_size_);
+ // There is a race condition where FreePage can coalesce fpr with the previous
+ // free page run before we acquire lock_. In that case free_page_runs_.find will not find
+ // a run starting at fpr. To handle this race, we skip reclaiming the page range and go
+ // to the next page.
+ if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
+ size_t fpr_size = fpr->ByteSize(this);
+ DCHECK(IsAligned<kPageSize>(fpr_size));
+ byte* start = reinterpret_cast<byte*>(fpr);
+ reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
+ size_t pages = fpr_size / kPageSize;
+ CHECK_GT(pages, 0U) << "Infinite loop probable";
+ i += pages;
+ DCHECK_LE(i, page_map_size_);
+ break;
+ }
}
- break;
+ // Fall through.
}
case kPageMapLargeObject: // Fall through.
case kPageMapLargeObjectPart: // Fall through.
case kPageMapRun: // Fall through.
case kPageMapRunPart: // Fall through.
- case kPageMapReleased: // Fall through since it is already released.
++i;
break; // Skip.
default:
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 07db169..86dab21 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -120,7 +120,7 @@
static constexpr size_t kDefaultStartingSize = kPageSize;
static constexpr size_t kDefaultInitialSize = 2 * MB;
- static constexpr size_t kDefaultMaximumSize = 32 * MB;
+ static constexpr size_t kDefaultMaximumSize = 256 * MB;
static constexpr size_t kDefaultMaxFree = 2 * MB;
static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
diff --git a/runtime/gc/reference_processor-inl.h b/runtime/gc/reference_processor-inl.h
new file mode 100644
index 0000000..f619a15
--- /dev/null
+++ b/runtime/gc/reference_processor-inl.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_REFERENCE_PROCESSOR_INL_H_
+#define ART_RUNTIME_GC_REFERENCE_PROCESSOR_INL_H_
+
+#include "reference_processor.h"
+
+namespace art {
+namespace gc {
+
+inline bool ReferenceProcessor::SlowPathEnabled() {
+ return mirror::Reference::GetJavaLangRefReference()->GetSlowPathEnabled();
+}
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_REFERENCE_PROCESSOR_INL_H_
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index e52bc1f..62d9e68 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -17,7 +17,9 @@
#include "reference_processor.h"
#include "mirror/object-inl.h"
+#include "mirror/reference.h"
#include "mirror/reference-inl.h"
+#include "reference_processor-inl.h"
#include "reflection.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
@@ -27,18 +29,17 @@
namespace gc {
ReferenceProcessor::ReferenceProcessor()
- : process_references_args_(nullptr, nullptr, nullptr), slow_path_enabled_(false),
+ : process_references_args_(nullptr, nullptr, nullptr),
preserving_references_(false), lock_("reference processor lock", kReferenceProcessorLock),
condition_("reference processor condition", lock_) {
}
void ReferenceProcessor::EnableSlowPath() {
- Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
- slow_path_enabled_ = true;
+ mirror::Reference::GetJavaLangRefReference()->SetSlowPathEnabled(true);
}
void ReferenceProcessor::DisableSlowPath(Thread* self) {
- slow_path_enabled_ = false;
+ mirror::Reference::GetJavaLangRefReference()->SetSlowPathEnabled(false);
condition_.Broadcast(self);
}
@@ -46,11 +47,11 @@
mirror::Object* const referent = reference->GetReferent();
// If the referent is null then it is already cleared, we can just return null since there is no
// scenario where it becomes non-null during the reference processing phase.
- if (LIKELY(!slow_path_enabled_) || referent == nullptr) {
+ if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
return referent;
}
MutexLock mu(self, lock_);
- while (slow_path_enabled_) {
+ while (SlowPathEnabled()) {
mirror::HeapReference<mirror::Object>* const referent_addr =
reference->GetReferentReferenceAddr();
// If the referent became cleared, return it. Don't need barrier since thread roots can't get
@@ -117,7 +118,7 @@
process_references_args_.is_marked_callback_ = is_marked_callback;
process_references_args_.mark_callback_ = mark_object_callback;
process_references_args_.arg_ = arg;
- CHECK_EQ(slow_path_enabled_, concurrent) << "Slow path must be enabled iff concurrent";
+ CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
}
// Unless required to clear soft references with white references, preserve some white referents.
if (!clear_soft_references) {
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 2771ea8..91328a3 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -30,6 +30,7 @@
namespace mirror {
class Object;
class Reference;
+class ReferenceClass;
} // namespace mirror
namespace gc {
@@ -49,6 +50,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
LOCKS_EXCLUDED(lock_);
+ // The slow path bool is contained in the reference class object, can only be set once
// Only allow setting this with mutators suspended so that we can avoid using a lock in the
// GetReferent fast path as an optimization.
void EnableSlowPath() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -60,7 +62,7 @@
IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void UpdateRoots(IsMarkedCallback* callback, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
private:
class ProcessReferencesArgs {
@@ -75,8 +77,10 @@
MarkObjectCallback* mark_callback_;
void* arg_;
};
+ bool SlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Called by ProcessReferences.
- void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// If we are preserving references it means that some dead objects may become live, we use start
// and stop preserving to block mutators using GetReferrent from getting access to these
// referents.
@@ -84,8 +88,6 @@
void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(lock_);
// Process args, used by the GetReferent to return referents which are already marked.
ProcessReferencesArgs process_references_args_ GUARDED_BY(lock_);
- // Boolean for whether or not we need to go slow path in GetReferent.
- volatile bool slow_path_enabled_;
// Boolean for whether or not we are preserving references (either soft references or finalizers).
// If this is true, then we cannot return a referent (see comment in GetReferent).
bool preserving_references_ GUARDED_BY(lock_);
diff --git a/runtime/mirror/reference.cc b/runtime/mirror/reference.cc
new file mode 100644
index 0000000..533aaca
--- /dev/null
+++ b/runtime/mirror/reference.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "reference.h"
+
+namespace art {
+namespace mirror {
+
+ReferenceClass* Reference::java_lang_ref_Reference_ = nullptr;
+
+void Reference::SetClass(ReferenceClass* java_lang_ref_Reference) {
+ CHECK(java_lang_ref_Reference_ == nullptr);
+ CHECK(java_lang_ref_Reference != nullptr);
+ java_lang_ref_Reference_ = java_lang_ref_Reference;
+}
+
+void Reference::ResetClass() {
+ CHECK(java_lang_ref_Reference_ != nullptr);
+ java_lang_ref_Reference_ = nullptr;
+}
+
+void Reference::VisitRoots(RootCallback* callback, void* arg) {
+ if (java_lang_ref_Reference_ != nullptr) {
+ callback(reinterpret_cast<mirror::Object**>(&java_lang_ref_Reference_),
+ arg, 0, kRootStickyClass);
+ }
+}
+
+} // namespace mirror
+} // namespace art
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 9c9d87b..15e0145 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -17,7 +17,10 @@
#ifndef ART_RUNTIME_MIRROR_REFERENCE_H_
#define ART_RUNTIME_MIRROR_REFERENCE_H_
+#include "class.h"
#include "object.h"
+#include "object_callbacks.h"
+#include "thread.h"
namespace art {
@@ -29,9 +32,11 @@
} // namespace gc
struct ReferenceOffsets;
+struct ReferenceClassOffsets;
struct FinalizerReferenceOffsets;
namespace mirror {
+class ReferenceClass;
// C++ mirror of java.lang.ref.Reference
class MANAGED Reference : public Object {
@@ -80,6 +85,15 @@
bool IsEnqueuable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static ReferenceClass* GetJavaLangRefReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CHECK(java_lang_ref_Reference_ != nullptr);
+ return ReadBarrier::BarrierForRoot<mirror::ReferenceClass, kWithReadBarrier>(
+ &java_lang_ref_Reference_);
+ }
+ static void SetClass(ReferenceClass* klass);
+ static void ResetClass(void);
+ static void VisitRoots(RootCallback* callback, void* arg);
+
private:
// Note: This avoids a read barrier, it should only be used by the GC.
HeapReference<Object>* GetReferentReferenceAddr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -92,12 +106,46 @@
HeapReference<Reference> queue_next_; // Note this is Java volatile:
HeapReference<Object> referent_; // Note this is Java volatile:
+ static ReferenceClass* java_lang_ref_Reference_;
+
friend struct art::ReferenceOffsets; // for verifying offset information
friend class gc::ReferenceProcessor;
friend class gc::ReferenceQueue;
DISALLOW_IMPLICIT_CONSTRUCTORS(Reference);
};
+// Tightly coupled with the ReferenceProcessor to provide switch for slow/fast path. Consistency
+// is maintained by ReferenceProcessor.
+class MANAGED ReferenceClass : public Class {
+ public:
+ static MemberOffset DisableIntrinsicOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(ReferenceClass, disable_intrinsic_);
+ }
+ static MemberOffset SlowPathEnabledOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(ReferenceClass, slow_path_enabled_);
+ }
+
+ void Init() {
+ disable_intrinsic_ = false;
+ slow_path_enabled_ = false;
+ }
+
+ bool GetSlowPathEnabled() const {
+ return slow_path_enabled_;
+ }
+ void SetSlowPathEnabled(bool enabled) {
+ slow_path_enabled_ = enabled;
+ }
+
+ private:
+ int32_t disable_intrinsic_;
+ int32_t slow_path_enabled_;
+ // allows runtime to safely enable/disable intrinsics fast path for benchmarking
+
+ friend struct art::ReferenceClassOffsets; // for verifying offset information
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ReferenceClass);
+};
+
// C++ mirror of java.lang.ref.FinalizerReference
class MANAGED FinalizerReference : public Reference {
public:
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 5128b19..982553d 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -48,6 +48,7 @@
kIntrinsicMinMaxFloat,
kIntrinsicMinMaxDouble,
kIntrinsicSqrt,
+ kIntrinsicGet,
kIntrinsicCharAt,
kIntrinsicCompareTo,
kIntrinsicIsEmptyOrLength,
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index f165ffa..267a22d 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -925,6 +925,7 @@
mirror::ArtField::VisitRoots(callback, arg);
mirror::ArtMethod::VisitRoots(callback, arg);
mirror::Class::VisitRoots(callback, arg);
+ mirror::Reference::VisitRoots(callback, arg);
mirror::StackTraceElement::VisitRoots(callback, arg);
mirror::String::VisitRoots(callback, arg);
mirror::Throwable::VisitRoots(callback, arg);