Merge "Update the Begin() of LargeObjectMapSpace"
diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h
index 4dcaf80..dec9c83 100644
--- a/cmdline/cmdline.h
+++ b/cmdline/cmdline.h
@@ -293,7 +293,7 @@
template <typename Args = CmdlineArgs>
struct CmdlineMain {
int Main(int argc, char** argv) {
- InitLogging(argv);
+ InitLogging(argv, Runtime::Aborter);
std::unique_ptr<Args> args = std::unique_ptr<Args>(CreateArguments());
args_ = args.get();
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 5809dcd..cad5104 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -122,7 +122,7 @@
using RuntimeParser = ParsedOptions::RuntimeParser;
static void SetUpTestCase() {
- art::InitLogging(nullptr); // argv = null
+ art::InitLogging(nullptr, art::Runtime::Aborter); // argv = null
}
virtual void SetUp() {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 8d64c65..afaec52 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1134,6 +1134,7 @@
VLOG(compiler) << "Adding " << descriptor << " to image classes";
for (size_t i = 0; i < klass->NumDirectInterfaces(); ++i) {
StackHandleScope<1> hs2(self);
+ // May cause thread suspension.
MaybeAddToImageClasses(hs2.NewHandle(mirror::Class::GetDirectInterface(self, klass, i)),
image_classes);
}
@@ -1153,15 +1154,14 @@
// Note: we can use object pointers because we suspend all threads.
class ClinitImageUpdate {
public:
- static ClinitImageUpdate* Create(std::unordered_set<std::string>* image_class_descriptors,
- Thread* self, ClassLinker* linker, std::string* error_msg) {
- std::unique_ptr<ClinitImageUpdate> res(new ClinitImageUpdate(image_class_descriptors, self,
+ static ClinitImageUpdate* Create(VariableSizedHandleScope& hs,
+ std::unordered_set<std::string>* image_class_descriptors,
+ Thread* self,
+ ClassLinker* linker) {
+ std::unique_ptr<ClinitImageUpdate> res(new ClinitImageUpdate(hs,
+ image_class_descriptors,
+ self,
linker));
- if (res->dex_cache_class_ == nullptr) {
- *error_msg = "Could not find DexCache class.";
- return nullptr;
- }
-
return res.release();
}
@@ -1171,7 +1171,9 @@
}
// Visitor for VisitReferences.
- void operator()(mirror::Object* object, MemberOffset field_offset, bool /* is_static */) const
+ void operator()(ObjPtr<mirror::Object> object,
+ MemberOffset field_offset,
+ bool /* is_static */) const
REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset);
if (ref != nullptr) {
@@ -1180,8 +1182,8 @@
}
// java.lang.Reference visitor for VisitReferences.
- void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref ATTRIBUTE_UNUSED)
- const {}
+ void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const {}
// Ignore class native roots.
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
@@ -1193,6 +1195,9 @@
for (mirror::Class* klass_root : image_classes_) {
VisitClinitClassesObject(klass_root);
}
+ for (Handle<mirror::Class> h_klass : to_insert_) {
+ MaybeAddToImageClasses(h_klass, image_class_descriptors_);
+ }
}
private:
@@ -1219,20 +1224,19 @@
ClinitImageUpdate* const data_;
};
- ClinitImageUpdate(std::unordered_set<std::string>* image_class_descriptors, Thread* self,
- ClassLinker* linker)
- REQUIRES_SHARED(Locks::mutator_lock_) :
- image_class_descriptors_(image_class_descriptors), self_(self) {
+ ClinitImageUpdate(VariableSizedHandleScope& hs,
+ std::unordered_set<std::string>* image_class_descriptors,
+ Thread* self,
+ ClassLinker* linker) REQUIRES_SHARED(Locks::mutator_lock_)
+ : hs_(hs),
+ image_class_descriptors_(image_class_descriptors),
+ self_(self) {
CHECK(linker != nullptr);
CHECK(image_class_descriptors != nullptr);
// Make sure nobody interferes with us.
old_cause_ = self->StartAssertNoThreadSuspension("Boot image closure");
- // Find the interesting classes.
- dex_cache_class_ = linker->LookupClass(self, "Ljava/lang/DexCache;",
- ComputeModifiedUtf8Hash("Ljava/lang/DexCache;"), nullptr);
-
// Find all the already-marked classes.
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
FindImageClassesVisitor visitor(this);
@@ -1251,25 +1255,25 @@
marked_objects_.insert(object);
if (object->IsClass()) {
- // If it is a class, add it.
- StackHandleScope<1> hs(self_);
- MaybeAddToImageClasses(hs.NewHandle(object->AsClass()), image_class_descriptors_);
+ // Add to the TODO list since MaybeAddToImageClasses may cause thread suspension. Thread
+ // suspensionb is not safe to do in VisitObjects or VisitReferences.
+ to_insert_.push_back(hs_.NewHandle(object->AsClass()));
} else {
// Else visit the object's class.
VisitClinitClassesObject(object->GetClass());
}
// If it is not a DexCache, visit all references.
- mirror::Class* klass = object->GetClass();
- if (klass != dex_cache_class_) {
+ if (!object->IsDexCache()) {
object->VisitReferences(*this, *this);
}
}
+ VariableSizedHandleScope& hs_;
+ mutable std::vector<Handle<mirror::Class>> to_insert_;
mutable std::unordered_set<mirror::Object*> marked_objects_;
std::unordered_set<std::string>* const image_class_descriptors_;
std::vector<mirror::Class*> image_classes_;
- const mirror::Class* dex_cache_class_;
Thread* const self_;
const char* old_cause_;
@@ -1285,12 +1289,12 @@
// Suspend all threads.
ScopedSuspendAll ssa(__FUNCTION__);
+ VariableSizedHandleScope hs(Thread::Current());
std::string error_msg;
- std::unique_ptr<ClinitImageUpdate> update(ClinitImageUpdate::Create(image_classes_.get(),
+ std::unique_ptr<ClinitImageUpdate> update(ClinitImageUpdate::Create(hs,
+ image_classes_.get(),
Thread::Current(),
- runtime->GetClassLinker(),
- &error_msg));
- CHECK(update.get() != nullptr) << error_msg; // TODO: Soft failure?
+ runtime->GetClassLinker()));
// Do the marking.
update->Walk();
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index b580049..6f48779 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -94,7 +94,7 @@
/*low_4gb*/false,
&error_msg));
CHECK(ef.get() != nullptr) << error_msg;
- CHECK(ef->Load(false, /*low_4gb*/false, &error_msg)) << error_msg;
+ CHECK(ef->Load(file.get(), false, /*low_4gb*/false, &error_msg)) << error_msg;
EXPECT_EQ(dl_oatdata, ef->FindDynamicSymbolAddress("oatdata"));
EXPECT_EQ(dl_oatexec, ef->FindDynamicSymbolAddress("oatexec"));
EXPECT_EQ(dl_oatlastword, ef->FindDynamicSymbolAddress("oatlastword"));
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 13c73dc..8f15ea4 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -461,6 +461,12 @@
dex_cache);
DCHECK_EQ(dex_file->NumStringIds() != 0u, dex_cache->GetStrings() != nullptr);
AddDexCacheArrayRelocation(dex_cache->GetStrings(), start + layout.StringsOffset(), dex_cache);
+
+ if (dex_cache->GetResolvedMethodTypes() != nullptr) {
+ AddDexCacheArrayRelocation(dex_cache->GetResolvedMethodTypes(),
+ start + layout.MethodTypesOffset(),
+ dex_cache);
+ }
}
}
@@ -1323,7 +1329,7 @@
root->Assign(VisitReference(root->AsMirrorPtr()));
}
- ALWAYS_INLINE void operator() (mirror::Object* obj,
+ ALWAYS_INLINE void operator() (ObjPtr<mirror::Object> obj,
MemberOffset offset,
bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1332,8 +1338,8 @@
obj->SetFieldObject</*kTransactionActive*/false>(offset, VisitReference(ref));
}
- ALWAYS_INLINE void operator() (mirror::Class* klass ATTRIBUTE_UNUSED,
- mirror::Reference* ref) const
+ ALWAYS_INLINE void operator() (ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) {
ref->SetReferent</*kTransactionActive*/false>(
VisitReference(ref->GetReferent<kWithoutReadBarrier>()));
@@ -1492,10 +1498,15 @@
// Calculate how big the intern table will be after being serialized.
InternTable* const intern_table = image_info.intern_table_.get();
CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings";
- image_info.intern_table_bytes_ = intern_table->WriteToMemory(nullptr);
+ if (intern_table->StrongSize() != 0u) {
+ image_info.intern_table_bytes_ = intern_table->WriteToMemory(nullptr);
+ }
// Calculate the size of the class table.
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- image_info.class_table_bytes_ += image_info.class_table_->WriteToMemory(nullptr);
+ DCHECK_EQ(image_info.class_table_->NumZygoteClasses(), 0u);
+ if (image_info.class_table_->NumNonZygoteClasses() != 0u) {
+ image_info.class_table_bytes_ += image_info.class_table_->WriteToMemory(nullptr);
+ }
}
// Calculate bin slot offsets.
@@ -1941,18 +1952,19 @@
void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
- void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<Object> obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
- Object* ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
+ ObjPtr<Object> ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
// Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
// image.
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
offset,
- image_writer_->GetImageAddress(ref));
+ image_writer_->GetImageAddress(ref.Ptr()));
}
// java.lang.ref.Reference visitor.
- void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
mirror::Reference::ReferentOffset(),
@@ -1969,14 +1981,14 @@
FixupClassVisitor(ImageWriter* image_writer, Object* copy) : FixupVisitor(image_writer, copy) {
}
- void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<Object> obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
DCHECK(obj->IsClass());
FixupVisitor::operator()(obj, offset, /*is_static*/false);
}
- void operator()(mirror::Class* klass ATTRIBUTE_UNUSED,
- mirror::Reference* ref ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
LOG(FATAL) << "Reference not expected here.";
}
@@ -2045,7 +2057,7 @@
void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) {
orig->FixupNativePointers(copy, target_ptr_size_, NativeLocationVisitor(this));
FixupClassVisitor visitor(this, copy);
- static_cast<mirror::Object*>(orig)->VisitReferences(visitor, visitor);
+ ObjPtr<mirror::Object>(orig)->VisitReferences(visitor, visitor);
// Remove the clinitThreadId. This is required for image determinism.
copy->SetClinitThreadId(static_cast<pid_t>(0));
@@ -2164,6 +2176,14 @@
mirror::DexCache::SetElementPtrSize(copy_fields, i, copy, target_ptr_size_);
}
}
+ mirror::MethodTypeDexCacheType* orig_method_types = orig_dex_cache->GetResolvedMethodTypes();
+ if (orig_method_types != nullptr) {
+ copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedMethodTypesOffset(),
+ NativeLocationInImage(orig_method_types),
+ PointerSize::k64);
+ orig_dex_cache->FixupResolvedMethodTypes(NativeCopyLocation(orig_method_types, orig_dex_cache),
+ ImageAddressVisitor(this));
+ }
// Remove the DexFile pointers. They will be fixed up when the runtime loads the oat file. Leaving
// compiler pointers in here will make the output non-deterministic.
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 140c7f0..663cbaf 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -162,17 +162,17 @@
/*out*/bool* needs_taken_test) {
bool is_last_value = false;
int64_t stride_value = 0;
- return GenerateCode(context,
- instruction,
- is_last_value,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr, // nothing generated yet
- &stride_value,
- needs_finite_test,
- needs_taken_test)
+ return GenerateRangeOrLastValue(context,
+ instruction,
+ is_last_value,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr, // nothing generated yet
+ &stride_value,
+ needs_finite_test,
+ needs_taken_test)
&& (stride_value == -1 ||
stride_value == 0 ||
stride_value == 1); // avoid wrap-around anomalies.
@@ -187,17 +187,17 @@
bool is_last_value = false;
int64_t stride_value = 0;
bool b1, b2; // unused
- if (!GenerateCode(context,
- instruction,
- is_last_value,
- graph,
- block,
- lower,
- upper,
- nullptr,
- &stride_value,
- &b1,
- &b2)) {
+ if (!GenerateRangeOrLastValue(context,
+ instruction,
+ is_last_value,
+ graph,
+ block,
+ lower,
+ upper,
+ nullptr,
+ &stride_value,
+ &b1,
+ &b2)) {
LOG(FATAL) << "Failed precondition: CanGenerateRange()";
}
}
@@ -209,17 +209,17 @@
bool is_last_value = false;
int64_t stride_value = 0;
bool b1, b2; // unused
- if (!GenerateCode(context,
- context,
- is_last_value,
- graph,
- block,
- nullptr,
- nullptr,
- &taken_test,
- &stride_value,
- &b1,
- &b2)) {
+ if (!GenerateRangeOrLastValue(context,
+ context,
+ is_last_value,
+ graph,
+ block,
+ nullptr,
+ nullptr,
+ &taken_test,
+ &stride_value,
+ &b1,
+ &b2)) {
LOG(FATAL) << "Failed precondition: CanGenerateRange()";
}
return taken_test;
@@ -230,17 +230,17 @@
int64_t stride_value = 0;
bool needs_finite_test = false;
bool needs_taken_test = false;
- return GenerateCode(instruction,
- instruction,
- is_last_value,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr, // nothing generated yet
- &stride_value,
- &needs_finite_test,
- &needs_taken_test)
+ return GenerateRangeOrLastValue(instruction,
+ instruction,
+ is_last_value,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr, // nothing generated yet
+ &stride_value,
+ &needs_finite_test,
+ &needs_taken_test)
&& !needs_finite_test && !needs_taken_test;
}
@@ -251,17 +251,17 @@
bool is_last_value = true;
int64_t stride_value = 0;
bool b1, b2; // unused
- if (!GenerateCode(instruction,
- instruction,
- is_last_value,
- graph,
- block,
- &last_value,
- &last_value,
- nullptr,
- &stride_value,
- &b1,
- &b2)) {
+ if (!GenerateRangeOrLastValue(instruction,
+ instruction,
+ is_last_value,
+ graph,
+ block,
+ &last_value,
+ &last_value,
+ nullptr,
+ &stride_value,
+ &b1,
+ &b2)) {
LOG(FATAL) << "Failed precondition: CanGenerateLastValue()";
}
return last_value;
@@ -280,6 +280,12 @@
}
}
+bool InductionVarRange::IsFinite(HLoopInformation* loop) const {
+ HInductionVarAnalysis::InductionInfo *trip =
+ induction_analysis_->LookupInfo(loop, GetLoopControl(loop));
+ return trip != nullptr && !IsUnsafeTripCount(trip);
+}
+
//
// Private class methods.
//
@@ -732,17 +738,17 @@
return Value();
}
-bool InductionVarRange::GenerateCode(HInstruction* context,
- HInstruction* instruction,
- bool is_last_value,
- HGraph* graph,
- HBasicBlock* block,
- /*out*/HInstruction** lower,
- /*out*/HInstruction** upper,
- /*out*/HInstruction** taken_test,
- /*out*/int64_t* stride_value,
- /*out*/bool* needs_finite_test,
- /*out*/bool* needs_taken_test) const {
+bool InductionVarRange::GenerateRangeOrLastValue(HInstruction* context,
+ HInstruction* instruction,
+ bool is_last_value,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/HInstruction** lower,
+ /*out*/HInstruction** upper,
+ /*out*/HInstruction** taken_test,
+ /*out*/int64_t* stride_value,
+ /*out*/bool* needs_finite_test,
+ /*out*/bool* needs_taken_test) const {
HLoopInformation* loop = nullptr;
HInductionVarAnalysis::InductionInfo* info = nullptr;
HInductionVarAnalysis::InductionInfo* trip = nullptr;
@@ -760,12 +766,17 @@
*needs_taken_test = IsBodyTripCount(trip);
// Handle last value request.
if (is_last_value) {
- if (info->induction_class != HInductionVarAnalysis::kLinear) {
- return false;
- } else if (*stride_value > 0) {
- lower = nullptr;
+ if (info->induction_class == HInductionVarAnalysis::kLinear) {
+ if (*stride_value > 0) {
+ lower = nullptr;
+ } else {
+ upper = nullptr;
+ }
+ } else if (info->induction_class == HInductionVarAnalysis::kPeriodic) {
+ DCHECK(!in_body);
+ return GenerateLastValuePeriodic(info, trip, graph, block, lower, needs_taken_test);
} else {
- upper = nullptr;
+ return false;
}
}
// Code generation for taken test: generate the code when requested or otherwise analyze
@@ -787,6 +798,56 @@
GenerateCode(info, trip, graph, block, upper, in_body, /* is_min */ false);
}
+bool InductionVarRange::GenerateLastValuePeriodic(HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* trip,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/HInstruction** result,
+ /*out*/bool* needs_taken_test) const {
+ DCHECK(info->induction_class == HInductionVarAnalysis::kPeriodic);
+ // Count period.
+ int32_t period = 1;
+ for (HInductionVarAnalysis::InductionInfo* p = info;
+ p->induction_class == HInductionVarAnalysis::kPeriodic;
+ p = p->op_b, ++period) {}
+ // Handle periodic(x, y) case for restricted types.
+ if (period != 2 ||
+ trip->op_a->type != Primitive::kPrimInt ||
+ (info->type != Primitive::kPrimInt && info->type != Primitive::kPrimBoolean)) {
+ return false; // TODO: easy to generalize
+ }
+ HInstruction* x_instr = nullptr;
+ HInstruction* y_instr = nullptr;
+ HInstruction* trip_expr = nullptr;
+ if (GenerateCode(info->op_a, nullptr, graph, block, graph ? &x_instr : nullptr, false, false) &&
+ GenerateCode(info->op_b, nullptr, graph, block, graph ? &y_instr : nullptr, false, false) &&
+ GenerateCode(trip->op_a, nullptr, graph, block, graph ? &trip_expr : nullptr, false, false)) {
+ // During actual code generation (graph != nullptr),
+ // generate is_even ? x : y select instruction.
+ if (graph != nullptr) {
+ HInstruction* is_even = Insert(block, new (graph->GetArena()) HEqual(
+ Insert(block, new (graph->GetArena()) HAnd(
+ Primitive::kPrimInt, trip_expr, graph->GetIntConstant(1))),
+ graph->GetIntConstant(0), kNoDexPc));
+ *result = Insert(block, new (graph->GetArena()) HSelect(is_even, x_instr, y_instr, kNoDexPc));
+ }
+ // Guard select with taken test if needed.
+ if (*needs_taken_test) {
+ HInstruction* taken_test = nullptr;
+ if (!GenerateCode(
+ trip->op_b, nullptr, graph, block, graph ? &taken_test : nullptr, false, false)) {
+ return false;
+ } else if (graph != nullptr) {
+ *result = Insert(block,
+ new (graph->GetArena()) HSelect(taken_test, *result, x_instr, kNoDexPc));
+ }
+ *needs_taken_test = false; // taken care of
+ }
+ return true;
+ }
+ return false;
+}
+
bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
HInductionVarAnalysis::InductionInfo* trip,
HGraph* graph, // when set, code is generated
@@ -812,6 +873,7 @@
// Invariants.
switch (info->operation) {
case HInductionVarAnalysis::kAdd:
+ case HInductionVarAnalysis::kXor:
case HInductionVarAnalysis::kLT:
case HInductionVarAnalysis::kLE:
case HInductionVarAnalysis::kGT:
@@ -823,6 +885,8 @@
switch (info->operation) {
case HInductionVarAnalysis::kAdd:
operation = new (graph->GetArena()) HAdd(type, opa, opb); break;
+ case HInductionVarAnalysis::kXor:
+ operation = new (graph->GetArena()) HXor(type, opa, opb); break;
case HInductionVarAnalysis::kLT:
operation = new (graph->GetArena()) HLessThan(opa, opb); break;
case HInductionVarAnalysis::kLE:
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index 8951300..2f70046 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -139,6 +139,11 @@
induction_analysis_->VisitLoop(loop);
}
+ /**
+ * Checks if header logic of a loop terminates.
+ */
+ bool IsFinite(HLoopInformation* loop) const;
+
private:
/*
* Enum used in IsConstant() request.
@@ -218,17 +223,24 @@
* success. With values nullptr, the method can be used to determine if code generation
* would be successful without generating actual code yet.
*/
- bool GenerateCode(HInstruction* context,
- HInstruction* instruction,
- bool is_last_val,
- HGraph* graph,
- HBasicBlock* block,
- /*out*/ HInstruction** lower,
- /*out*/ HInstruction** upper,
- /*out*/ HInstruction** taken_test,
- /*out*/ int64_t* stride_value,
- /*out*/ bool* needs_finite_test,
- /*out*/ bool* needs_taken_test) const;
+ bool GenerateRangeOrLastValue(HInstruction* context,
+ HInstruction* instruction,
+ bool is_last_val,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/ HInstruction** lower,
+ /*out*/ HInstruction** upper,
+ /*out*/ HInstruction** taken_test,
+ /*out*/ int64_t* stride_value,
+ /*out*/ bool* needs_finite_test,
+ /*out*/ bool* needs_taken_test) const;
+
+ bool GenerateLastValuePeriodic(HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* trip,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/HInstruction** result,
+ /*out*/ bool* needs_taken_test) const;
bool GenerateCode(HInductionVarAnalysis::InductionInfo* info,
HInductionVarAnalysis::InductionInfo* trip,
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 7347686..820fa29 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -168,7 +168,9 @@
const int16_t declaring_class_def_index_; // declaring class's def's dex index.
bool value_killed_by_loop_side_effects_; // value of this location may be killed by loop
// side effects because this location is stored
- // into inside a loop.
+ // into inside a loop. This gives
+ // better info on whether a singleton's location
+ // value may be killed by loop side effects.
DISALLOW_COPY_AND_ASSIGN(HeapLocation);
};
@@ -420,8 +422,26 @@
void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
HeapLocation* location = VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
has_heap_stores_ = true;
- if (instruction->GetBlock()->GetLoopInformation() != nullptr) {
- location->SetValueKilledByLoopSideEffects(true);
+ if (location->GetReferenceInfo()->IsSingleton()) {
+ // A singleton's location value may be killed by loop side effects if it's
+ // defined before that loop, and it's stored into inside that loop.
+ HLoopInformation* loop_info = instruction->GetBlock()->GetLoopInformation();
+ if (loop_info != nullptr) {
+ HInstruction* ref = location->GetReferenceInfo()->GetReference();
+ DCHECK(ref->IsNewInstance());
+ if (loop_info->IsDefinedOutOfTheLoop(ref)) {
+ // ref's location value may be killed by this loop's side effects.
+ location->SetValueKilledByLoopSideEffects(true);
+ } else {
+ // ref is defined inside this loop so this loop's side effects cannot
+ // kill its location value at the loop header since ref/its location doesn't
+ // exist yet at the loop header.
+ }
+ }
+ } else {
+ // For non-singletons, value_killed_by_loop_side_effects_ is inited to
+ // true.
+ DCHECK_EQ(location->IsValueKilledByLoopSideEffects(), true);
}
}
@@ -810,9 +830,6 @@
if (loop_info != nullptr) {
// instruction is a store in the loop so the loop must does write.
DCHECK(side_effects_.GetLoopEffects(loop_info->GetHeader()).DoesAnyWrite());
- // If it's a singleton, IsValueKilledByLoopSideEffects() must be true.
- DCHECK(!ref_info->IsSingleton() ||
- heap_location_collector_.GetHeapLocation(idx)->IsValueKilledByLoopSideEffects());
if (loop_info->IsDefinedOutOfTheLoop(original_ref)) {
DCHECK(original_ref->GetBlock()->Dominates(loop_info->GetPreHeader()));
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 33fa87d..703a104 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -20,17 +20,37 @@
namespace art {
-// TODO: Generalize to cycles, as found by induction analysis?
+// Detects a potential induction cycle. Note that the actual induction
+// information is queried later if its last value is really needed.
static bool IsPhiInduction(HPhi* phi, ArenaSet<HInstruction*>* iset) {
DCHECK(iset->empty());
HInputsRef inputs = phi->GetInputs();
- if (inputs.size() == 2 && (inputs[1]->IsAdd() || inputs[1]->IsSub())) {
- HInstruction* addsub = inputs[1];
- if (addsub->InputAt(0) == phi || addsub->InputAt(1) == phi) {
- if (addsub->GetUses().HasExactlyOneElement()) {
- iset->insert(phi);
- iset->insert(addsub);
- return true;
+ if (inputs.size() == 2) {
+ HLoopInformation* loop_info = phi->GetBlock()->GetLoopInformation();
+ HInstruction* op = inputs[1];
+ if (op->GetBlock()->GetLoopInformation() == loop_info) {
+ // Chase a simple chain back to phi.
+ while (!op->IsPhi()) {
+ // Binary operation with single use in same loop.
+ if (!op->IsBinaryOperation() || !op->GetUses().HasExactlyOneElement()) {
+ return false;
+ }
+ // Chase back either through left or right operand.
+ iset->insert(op);
+ HInstruction* a = op->InputAt(0);
+ HInstruction* b = op->InputAt(1);
+ if (a->GetBlock()->GetLoopInformation() == loop_info && b != phi) {
+ op = a;
+ } else if (b->GetBlock()->GetLoopInformation() == loop_info) {
+ op = b;
+ } else {
+ return false;
+ }
+ }
+ // Closed the cycle?
+ if (op == phi) {
+ iset->insert(phi);
+ return true;
}
}
}
@@ -62,16 +82,23 @@
return false;
}
+// Does the loop-body consist of induction cycle and direct control flow only?
static bool IsEmptyBody(HBasicBlock* block, ArenaSet<HInstruction*>* iset) {
- HInstruction* phi = block->GetFirstPhi();
- HInstruction* i = block->GetFirstInstruction();
- return phi == nullptr && iset->find(i) != iset->end() &&
- i->GetNext() != nullptr && i->GetNext()->IsGoto();
+ if (block->GetFirstPhi() == nullptr) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (!instruction->IsGoto() && iset->find(instruction) == iset->end()) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return false;
}
+// Remove the instruction from the graph. A bit more elaborate than the usual
+// instruction removal, since there may be a cycle in the use structure.
static void RemoveFromCycle(HInstruction* instruction) {
- // A bit more elaborate than the usual instruction removal,
- // since there may be a cycle in the use structure.
instruction->RemoveAsUserOfAllInputs();
instruction->RemoveEnvironmentUsers();
instruction->GetBlock()->RemoveInstructionOrPhi(instruction, /*ensure_safety=*/ false);
@@ -196,7 +223,9 @@
}
SimplifyInduction(node);
SimplifyBlocks(node);
- RemoveIfEmptyLoop(node);
+ if (node->inner == nullptr) {
+ RemoveIfEmptyInnerLoop(node);
+ }
}
}
@@ -233,7 +262,7 @@
block->RemoveInstruction(instruction);
}
}
- // Remove trivial control flow blocks from the loop body, again usually resulting
+ // Remove trivial control flow blocks from the loop-body, again usually resulting
// from eliminating induction cycles.
if (block->GetPredecessors().size() == 1 &&
block->GetSuccessors().size() == 1 &&
@@ -252,9 +281,13 @@
}
}
-void HLoopOptimization::RemoveIfEmptyLoop(LoopNode* node) {
+void HLoopOptimization::RemoveIfEmptyInnerLoop(LoopNode* node) {
HBasicBlock* header = node->loop_info->GetHeader();
HBasicBlock* preheader = node->loop_info->GetPreHeader();
+ // Ensure loop header logic is finite.
+ if (!induction_range_.IsFinite(node->loop_info)) {
+ return;
+ }
// Ensure there is only a single loop-body (besides the header).
HBasicBlock* body = nullptr;
for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 9c4b462..4113357 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -62,7 +62,7 @@
void SimplifyInduction(LoopNode* node);
void SimplifyBlocks(LoopNode* node);
- void RemoveIfEmptyLoop(LoopNode* node);
+ void RemoveIfEmptyInnerLoop(LoopNode* node);
bool IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index e26fa7f..0ce1362 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1090,7 +1090,7 @@
original_argc = argc;
original_argv = argv;
- InitLogging(argv);
+ InitLogging(argv, Runtime::Aborter);
// Skip over argv[0].
argv++;
@@ -1920,15 +1920,14 @@
TimingLogger::ScopedTiming t("dex2oat OatFile copy", timings_);
std::unique_ptr<File> in(OS::OpenFileForReading(oat_filenames_[i]));
std::unique_ptr<File> out(OS::CreateEmptyFile(oat_unstripped_[i]));
- size_t buffer_size = 8192;
- std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
- while (true) {
- int bytes_read = TEMP_FAILURE_RETRY(read(in->Fd(), buffer.get(), buffer_size));
- if (bytes_read <= 0) {
- break;
- }
- bool write_ok = out->WriteFully(buffer.get(), bytes_read);
- CHECK(write_ok);
+ int64_t in_length = in->GetLength();
+ if (in_length < 0) {
+ PLOG(ERROR) << "Failed to get the length of oat file: " << in->GetPath();
+ return false;
+ }
+ if (!out->Copy(in.get(), 0, in_length)) {
+ PLOG(ERROR) << "Failed to copy oat file to file: " << out->GetPath();
+ return false;
}
if (out->FlushCloseOrErase() != 0) {
PLOG(ERROR) << "Failed to flush and close copied oat file: " << oat_unstripped_[i];
diff --git a/dexdump/dexdump_main.cc b/dexdump/dexdump_main.cc
index 5c032a0..74cae3c 100644
--- a/dexdump/dexdump_main.cc
+++ b/dexdump/dexdump_main.cc
@@ -29,6 +29,7 @@
#include <unistd.h>
#include "base/logging.h"
+#include "runtime.h"
#include "mem_map.h"
namespace art {
@@ -59,7 +60,7 @@
*/
int dexdumpDriver(int argc, char** argv) {
// Art specific set up.
- InitLogging(argv);
+ InitLogging(argv, Runtime::Aborter);
MemMap::Init();
// Reset options.
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index 728e389..2203fba 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -31,6 +31,7 @@
#include "base/logging.h"
#include "jit/offline_profiling_info.h"
+#include "runtime.h"
#include "mem_map.h"
namespace art {
@@ -65,7 +66,7 @@
*/
int DexlayoutDriver(int argc, char** argv) {
// Art specific set up.
- InitLogging(argv);
+ InitLogging(argv, Runtime::Aborter);
MemMap::Init();
// Reset options.
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index a1bde0e..68473c4 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -213,7 +213,7 @@
*/
int dexlistDriver(int argc, char** argv) {
// Art specific set up.
- InitLogging(argv);
+ InitLogging(argv, Runtime::Aborter);
MemMap::Init();
// Reset options.
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index f3eb663..986f265 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -442,7 +442,7 @@
return ERROR_OAT_FILE;
}
- const std::string& file_path = oat_in->GetFile().GetPath();
+ const std::string& file_path = oat_in->GetFilePath();
const OatHeader* oat_header = GetOatHeader(oat_in);
if (oat_header == nullptr) {
@@ -677,6 +677,16 @@
mirror::DexCache::SetElementPtrSize(copy_fields, j, copy, pointer_size);
}
}
+ mirror::MethodTypeDexCacheType* orig_method_types = orig_dex_cache->GetResolvedMethodTypes();
+ mirror::MethodTypeDexCacheType* relocated_method_types =
+ RelocatedAddressOfPointer(orig_method_types);
+ copy_dex_cache->SetField64<false>(
+ mirror::DexCache::ResolvedMethodTypesOffset(),
+ static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_method_types)));
+ if (orig_method_types != nullptr) {
+ orig_dex_cache->FixupResolvedMethodTypes(RelocatedCopyOf(orig_method_types),
+ RelocatedPointerVisitor(this));
+ }
}
}
@@ -715,15 +725,16 @@
}
-void PatchOat::PatchVisitor::operator() (mirror::Object* obj, MemberOffset off,
+void PatchOat::PatchVisitor::operator() (ObjPtr<mirror::Object> obj,
+ MemberOffset off,
bool is_static_unused ATTRIBUTE_UNUSED) const {
mirror::Object* referent = obj->GetFieldObject<mirror::Object, kVerifyNone>(off);
mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent);
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
}
-void PatchOat::PatchVisitor::operator() (mirror::Class* cls ATTRIBUTE_UNUSED,
- mirror::Reference* ref) const {
+void PatchOat::PatchVisitor::operator() (ObjPtr<mirror::Class> cls ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref) const {
MemberOffset off = mirror::Reference::ReferentOffset();
mirror::Object* referent = ref->GetReferent();
DCHECK(referent == nullptr ||
@@ -846,7 +857,7 @@
}
OatHeader* oat_header = reinterpret_cast<OatHeader*>(oat_file->Begin() + rodata_sec->sh_offset);
if (!oat_header->IsValid()) {
- LOG(ERROR) << "Elf file " << oat_file->GetFile().GetPath() << " has an invalid oat header";
+ LOG(ERROR) << "Elf file " << oat_file->GetFilePath() << " has an invalid oat header";
return false;
}
oat_header->RelocateOat(delta_);
@@ -854,10 +865,11 @@
}
bool PatchOat::PatchElf() {
- if (oat_file_->Is64Bit())
+ if (oat_file_->Is64Bit()) {
return PatchElf<ElfFileImpl64>(oat_file_->GetImpl64());
- else
+ } else {
return PatchElf<ElfFileImpl32>(oat_file_->GetImpl32());
+ }
}
template <typename ElfFileImpl>
@@ -1358,15 +1370,13 @@
}
static int patchoat(int argc, char **argv) {
- InitLogging(argv);
+ InitLogging(argv, Runtime::Aborter);
MemMap::Init();
const bool debug = kIsDebugBuild;
orig_argc = argc;
orig_argv = argv;
TimingLogger timings("patcher", false, false);
- InitLogging(argv);
-
// Skip over the command name.
argv++;
argc--;
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index a97b051..e7a3e91 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -198,10 +198,10 @@
public:
PatchVisitor(PatchOat* patcher, mirror::Object* copy) : patcher_(patcher), copy_(copy) {}
~PatchVisitor() {}
- void operator() (mirror::Object* obj, MemberOffset off, bool b) const
+ void operator() (ObjPtr<mirror::Object> obj, MemberOffset off, bool b) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// For reference classes.
- void operator() (mirror::Class* cls, mirror::Reference* ref) const
+ void operator() (ObjPtr<mirror::Class> cls, ObjPtr<mirror::Reference> ref) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// TODO: Consider using these for updating native class roots?
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
diff --git a/profman/profman.cc b/profman/profman.cc
index 7722e80..b17816b 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -33,6 +33,7 @@
#include "base/unix_file/fd_file.h"
#include "dex_file.h"
#include "jit/offline_profiling_info.h"
+#include "runtime.h"
#include "utils.h"
#include "zip_archive.h"
#include "profile_assistant.h"
@@ -143,7 +144,7 @@
original_argc = argc;
original_argv = argv;
- InitLogging(argv);
+ InitLogging(argv, Runtime::Aborter);
// Skip over the command name.
argv++;
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 73c6cf1..1aa6a00 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -227,9 +227,10 @@
case kDirect:
return !IsDirect() || IsStatic();
case kVirtual: {
- // We have an error if we are direct or a non-default, non-miranda interface method.
+ // We have an error if we are direct or a non-copied (i.e. not part of a real class) interface
+ // method.
mirror::Class* methods_class = GetDeclaringClass();
- return IsDirect() || (methods_class->IsInterface() && !IsDefault() && !IsMiranda());
+ return IsDirect() || (methods_class->IsInterface() && !IsCopied());
}
case kSuper:
// Constructors and static methods are called with invoke-direct.
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index 12d3be7..f24a862 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -296,7 +296,7 @@
return const_iterator(this, NumBuckets());
}
- bool Empty() {
+ bool Empty() const {
return Size() == 0;
}
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 08c036e..6b21a56 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -21,14 +21,12 @@
#include <sstream>
#include "base/mutex.h"
-#include "runtime.h"
#include "thread-inl.h"
#include "utils.h"
// Headers for LogMessage::LogLine.
#ifdef ART_TARGET_ANDROID
#include <android/log.h>
-#include <android/set_abort_message.h>
#else
#include <sys/types.h>
#include <unistd.h>
@@ -57,17 +55,7 @@
: "art";
}
-NO_RETURN
-static void RuntimeAborter(const char* abort_message) {
-#ifdef __ANDROID__
- android_set_abort_message(abort_message);
-#else
- UNUSED(abort_message);
-#endif
- Runtime::Abort(abort_message);
-}
-
-void InitLogging(char* argv[]) {
+void InitLogging(char* argv[], AbortFunction& abort_function) {
if (gCmdLine.get() != nullptr) {
return;
}
@@ -97,7 +85,8 @@
#else
#define INIT_LOGGING_DEFAULT_LOGGER android::base::StderrLogger
#endif
- android::base::InitLogging(argv, INIT_LOGGING_DEFAULT_LOGGER, RuntimeAborter);
+ android::base::InitLogging(argv, INIT_LOGGING_DEFAULT_LOGGER,
+ std::move<AbortFunction>(abort_function));
#undef INIT_LOGGING_DEFAULT_LOGGER
}
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 5f84204..a173ac2 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -29,6 +29,9 @@
using ::android::base::LogSeverity;
using ::android::base::ScopedLogSeverity;
+// Abort function.
+using AbortFunction = void(const char*);
+
// The members of this struct are the valid arguments to VLOG and VLOG_IS_ON in code,
// and the "-verbose:" command line argument.
struct LogVerbosity {
@@ -71,7 +74,7 @@
// The tag (or '*' for the global level) comes first, followed by a colon
// and a letter indicating the minimum priority level we're expected to log.
// This can be used to reveal or conceal logs with specific tags.
-extern void InitLogging(char* argv[]);
+extern void InitLogging(char* argv[], AbortFunction& default_aborter);
// Returns the command line used to invoke the current tool or null if InitLogging hasn't been
// performed.
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index dba9b8f..126187f 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -36,7 +36,8 @@
return FindClass(self, descriptor, ScopedNullHandle<mirror::ClassLoader>());
}
-inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class** element_class) {
+inline mirror::Class* ClassLinker::FindArrayClass(Thread* self,
+ ObjPtr<mirror::Class>* element_class) {
for (size_t i = 0; i < kFindArrayCacheSize; ++i) {
// Read the cached array class once to avoid races with other threads setting it.
mirror::Class* array_class = find_array_class_cache_[i].Read();
@@ -49,7 +50,7 @@
descriptor += (*element_class)->GetDescriptor(&temp);
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::ClassLoader> class_loader(hs.NewHandle((*element_class)->GetClassLoader()));
- HandleWrapper<mirror::Class> h_element_class(hs.NewHandleWrapper(element_class));
+ HandleWrapperObjPtr<mirror::Class> h_element_class(hs.NewHandleWrapper(element_class));
mirror::Class* array_class = FindClass(self, descriptor.c_str(), class_loader);
if (array_class != nullptr) {
// Benign races in storing array class and incrementing index.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index eb2316b..63de76c 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -316,7 +316,6 @@
ClassLinker::ClassLinker(InternTable* intern_table)
// dex_lock_ is recursive as it may be used in stack dumping.
: dex_lock_("ClassLinker dex lock", kDexLock),
- dex_cache_boot_image_class_lookup_required_(false),
failed_dex_cache_class_lookups_(0),
class_roots_(nullptr),
array_iftable_(nullptr),
@@ -969,7 +968,6 @@
return false;
}
}
- dex_cache_boot_image_class_lookup_required_ = true;
std::vector<const OatFile*> oat_files =
runtime->GetOatFileManager().RegisterImageOatFiles(spaces);
DCHECK(!oat_files.empty());
@@ -1256,7 +1254,6 @@
// Add image classes into the class table for the class loader, and fixup the dex caches and
// class loader fields.
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- ClassTable* table = InsertClassTableForClassLoader(class_loader.Get());
// Dex cache array fixup is all or nothing, we must reject app images that have mixed since we
// rely on clobering the dex cache arrays in the image to forward to bss.
size_t num_dex_caches_with_bss_arrays = 0;
@@ -1377,6 +1374,7 @@
std::memory_order_relaxed);
}
+ mirror::MethodTypeDexCachePair::Initialize(method_types);
dex_cache->SetResolvedMethodTypes(method_types);
}
}
@@ -1392,103 +1390,42 @@
StackHandleScope<1> hs3(self);
RegisterDexFileLocked(*dex_file, hs3.NewHandle(dex_cache));
}
- GcRoot<mirror::Class>* const types = dex_cache->GetResolvedTypes();
- const size_t num_types = dex_cache->NumResolvedTypes();
- if (new_class_set == nullptr) {
- for (int32_t j = 0; j < static_cast<int32_t>(num_types); j++) {
- // The image space is not yet added to the heap, avoid read barriers.
- mirror::Class* klass = types[j].Read();
- // There may also be boot image classes,
- if (space->HasAddress(klass)) {
- DCHECK_NE(klass->GetStatus(), mirror::Class::kStatusError);
- // Update the class loader from the one in the image class loader to the one that loaded
- // the app image.
- klass->SetClassLoader(class_loader.Get());
- // The resolved type could be from another dex cache, go through the dex cache just in
- // case. May be null for array classes.
- if (klass->GetDexCacheStrings() != nullptr) {
- DCHECK(!klass->IsArrayClass());
- klass->SetDexCacheStrings(klass->GetDexCache()->GetStrings());
- }
- // If there are multiple dex caches, there may be the same class multiple times
- // in different dex caches. Check for this since inserting will add duplicates
- // otherwise.
- if (num_dex_caches > 1) {
- mirror::Class* existing = table->LookupByDescriptor(klass);
- if (existing != nullptr) {
- DCHECK_EQ(existing, klass) << PrettyClass(klass);
- } else {
- table->Insert(klass);
- }
- } else {
- table->Insert(klass);
- }
- // Double checked VLOG to avoid overhead.
- if (VLOG_IS_ON(image)) {
- VLOG(image) << PrettyClass(klass) << " " << klass->GetStatus();
- if (!klass->IsArrayClass()) {
- VLOG(image) << "From " << klass->GetDexCache()->GetDexFile()->GetBaseLocation();
- }
- VLOG(image) << "Direct methods";
- for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
- VLOG(image) << PrettyMethod(&m);
- }
- VLOG(image) << "Virtual methods";
- for (ArtMethod& m : klass->GetVirtualMethods(kRuntimePointerSize)) {
- VLOG(image) << PrettyMethod(&m);
- }
- }
- } else {
- DCHECK(klass == nullptr || heap->ObjectIsInBootImageSpace(klass))
- << klass << " " << PrettyClass(klass);
- }
- }
- }
if (kIsDebugBuild) {
+ CHECK(new_class_set != nullptr);
+ GcRoot<mirror::Class>* const types = dex_cache->GetResolvedTypes();
+ const size_t num_types = dex_cache->NumResolvedTypes();
for (int32_t j = 0; j < static_cast<int32_t>(num_types); j++) {
// The image space is not yet added to the heap, avoid read barriers.
mirror::Class* klass = types[j].Read();
if (space->HasAddress(klass)) {
DCHECK_NE(klass->GetStatus(), mirror::Class::kStatusError);
- if (kIsDebugBuild) {
- if (new_class_set != nullptr) {
- auto it = new_class_set->Find(GcRoot<mirror::Class>(klass));
- DCHECK(it != new_class_set->end());
- DCHECK_EQ(it->Read(), klass);
- mirror::Class* super_class = klass->GetSuperClass();
- if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) {
- auto it2 = new_class_set->Find(GcRoot<mirror::Class>(super_class));
- DCHECK(it2 != new_class_set->end());
- DCHECK_EQ(it2->Read(), super_class);
- }
- } else {
- DCHECK_EQ(table->LookupByDescriptor(klass), klass);
- mirror::Class* super_class = klass->GetSuperClass();
- if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) {
- CHECK_EQ(table->LookupByDescriptor(super_class), super_class);
- }
+ auto it = new_class_set->Find(GcRoot<mirror::Class>(klass));
+ DCHECK(it != new_class_set->end());
+ DCHECK_EQ(it->Read(), klass);
+ mirror::Class* super_class = klass->GetSuperClass();
+ if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) {
+ auto it2 = new_class_set->Find(GcRoot<mirror::Class>(super_class));
+ DCHECK(it2 != new_class_set->end());
+ DCHECK_EQ(it2->Read(), super_class);
+ }
+ for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
+ const void* code = m.GetEntryPointFromQuickCompiledCode();
+ const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
+ if (!IsQuickResolutionStub(code) &&
+ !IsQuickGenericJniStub(code) &&
+ !IsQuickToInterpreterBridge(code) &&
+ !m.IsNative()) {
+ DCHECK_EQ(code, oat_code) << PrettyMethod(&m);
}
}
- if (kIsDebugBuild) {
- for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
- const void* code = m.GetEntryPointFromQuickCompiledCode();
- const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
- if (!IsQuickResolutionStub(code) &&
- !IsQuickGenericJniStub(code) &&
- !IsQuickToInterpreterBridge(code) &&
- !m.IsNative()) {
- DCHECK_EQ(code, oat_code) << PrettyMethod(&m);
- }
- }
- for (ArtMethod& m : klass->GetVirtualMethods(kRuntimePointerSize)) {
- const void* code = m.GetEntryPointFromQuickCompiledCode();
- const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
- if (!IsQuickResolutionStub(code) &&
- !IsQuickGenericJniStub(code) &&
- !IsQuickToInterpreterBridge(code) &&
- !m.IsNative()) {
- DCHECK_EQ(code, oat_code) << PrettyMethod(&m);
- }
+ for (ArtMethod& m : klass->GetVirtualMethods(kRuntimePointerSize)) {
+ const void* code = m.GetEntryPointFromQuickCompiledCode();
+ const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
+ if (!IsQuickResolutionStub(code) &&
+ !IsQuickGenericJniStub(code) &&
+ !IsQuickToInterpreterBridge(code) &&
+ !m.IsNative()) {
+ DCHECK_EQ(code, oat_code) << PrettyMethod(&m);
}
}
}
@@ -1805,9 +1742,6 @@
temp_set = ClassTable::ClassSet(space->Begin() + class_table_section.Offset(),
/*make copy*/false,
&read_count);
- if (!app_image) {
- dex_cache_boot_image_class_lookup_required_ = false;
- }
VLOG(image) << "Adding class table classes took " << PrettyDuration(NanoTime() - start_time2);
}
if (app_image) {
@@ -1815,7 +1749,7 @@
if (!UpdateAppImageClassLoadersAndDexCaches(space,
class_loader,
dex_caches,
- added_class_table ? &temp_set : nullptr,
+ &temp_set,
/*out*/&forward_dex_cache_arrays,
/*out*/error_msg)) {
return false;
@@ -1825,10 +1759,8 @@
UpdateClassLoaderAndResolvedStringsVisitor visitor(space,
class_loader.Get(),
forward_dex_cache_arrays);
- if (added_class_table) {
- for (GcRoot<mirror::Class>& root : temp_set) {
- visitor(root.Read());
- }
+ for (GcRoot<mirror::Class>& root : temp_set) {
+ visitor(root.Read());
}
// forward_dex_cache_arrays is true iff we copied all of the dex cache arrays into the .bss.
// In this case, madvise away the dex cache arrays section of the image to reduce RAM usage and
@@ -1962,9 +1894,6 @@
}
void ClassLinker::VisitClasses(ClassVisitor* visitor) {
- if (dex_cache_boot_image_class_lookup_required_) {
- AddBootImageClassesToClassTable();
- }
Thread* const self = Thread::Current();
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
// Not safe to have thread suspension when we are holding a lock.
@@ -2033,7 +1962,7 @@
// Add 100 in case new classes get loaded when we are filling in the object array.
class_table_size = NumZygoteClasses() + NumNonZygoteClasses() + 100;
}
- mirror::Class* class_type = mirror::Class::GetJavaLangClass();
+ ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass();
mirror::Class* array_of_class = FindArrayClass(self, &class_type);
classes.Assign(
mirror::ObjectArray<mirror::Class>::Alloc(self, array_of_class, class_table_size));
@@ -2365,7 +2294,7 @@
const char* descriptor,
size_t hash,
Handle<mirror::ClassLoader> class_loader,
- mirror::Class** result) {
+ ObjPtr<mirror::Class>* result) {
// Termination case: boot class-loader.
if (IsBootClassLoader(soa, class_loader.Get())) {
// The boot class loader, search the boot class path.
@@ -2523,12 +2452,12 @@
}
} else {
ScopedObjectAccessUnchecked soa(self);
- mirror::Class* cp_klass;
+ ObjPtr<mirror::Class> cp_klass;
if (FindClassInPathClassLoader(soa, self, descriptor, hash, class_loader, &cp_klass)) {
// The chain was understood. So the value in cp_klass is either the class we were looking
// for, or not found.
if (cp_klass != nullptr) {
- return cp_klass;
+ return cp_klass.Ptr();
}
// TODO: We handle the boot classpath loader in FindClassInPathClassLoader. Try to unify this
// and the branch above. TODO: throw the right exception here.
@@ -3608,17 +3537,6 @@
if (existing != nullptr) {
return existing;
}
- if (kIsDebugBuild &&
- !klass->IsTemp() &&
- class_loader == nullptr &&
- dex_cache_boot_image_class_lookup_required_) {
- // Check a class loaded with the system class loader matches one in the image if the class
- // is in the image.
- existing = LookupClassFromBootImage(descriptor);
- if (existing != nullptr) {
- CHECK_EQ(klass, existing);
- }
- }
VerifyObject(klass);
class_table->InsertWithHash(klass, hash);
if (class_loader != nullptr) {
@@ -3658,90 +3576,15 @@
const char* descriptor,
size_t hash,
mirror::ClassLoader* class_loader) {
- {
- ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- ClassTable* const class_table = ClassTableForClassLoader(class_loader);
- if (class_table != nullptr) {
- mirror::Class* result = class_table->Lookup(descriptor, hash);
- if (result != nullptr) {
- return result;
- }
+ ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ ClassTable* const class_table = ClassTableForClassLoader(class_loader);
+ if (class_table != nullptr) {
+ mirror::Class* result = class_table->Lookup(descriptor, hash);
+ if (result != nullptr) {
+ return result;
}
}
- if (class_loader != nullptr || !dex_cache_boot_image_class_lookup_required_) {
- return nullptr;
- }
- // Lookup failed but need to search dex_caches_.
- mirror::Class* result = LookupClassFromBootImage(descriptor);
- if (result != nullptr) {
- result = InsertClass(descriptor, result, hash);
- } else {
- // Searching the image dex files/caches failed, we don't want to get into this situation
- // often as map searches are faster, so after kMaxFailedDexCacheLookups move all image
- // classes into the class table.
- constexpr uint32_t kMaxFailedDexCacheLookups = 1000;
- if (++failed_dex_cache_class_lookups_ > kMaxFailedDexCacheLookups) {
- AddBootImageClassesToClassTable();
- }
- }
- return result;
-}
-
-static std::vector<mirror::ObjectArray<mirror::DexCache>*> GetImageDexCaches(
- std::vector<gc::space::ImageSpace*> image_spaces) REQUIRES_SHARED(Locks::mutator_lock_) {
- CHECK(!image_spaces.empty());
- std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector;
- for (gc::space::ImageSpace* image_space : image_spaces) {
- mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
- DCHECK(root != nullptr);
- dex_caches_vector.push_back(root->AsObjectArray<mirror::DexCache>());
- }
- return dex_caches_vector;
-}
-
-void ClassLinker::AddBootImageClassesToClassTable() {
- if (dex_cache_boot_image_class_lookup_required_) {
- AddImageClassesToClassTable(Runtime::Current()->GetHeap()->GetBootImageSpaces(),
- /*class_loader*/nullptr);
- dex_cache_boot_image_class_lookup_required_ = false;
- }
-}
-
-void ClassLinker::AddImageClassesToClassTable(std::vector<gc::space::ImageSpace*> image_spaces,
- mirror::ClassLoader* class_loader) {
- Thread* self = Thread::Current();
- WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- ScopedAssertNoThreadSuspension ants("Moving image classes to class table");
-
- ClassTable* const class_table = InsertClassTableForClassLoader(class_loader);
-
- std::string temp;
- std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector =
- GetImageDexCaches(image_spaces);
- for (mirror::ObjectArray<mirror::DexCache>* dex_caches : dex_caches_vector) {
- for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- GcRoot<mirror::Class>* types = dex_cache->GetResolvedTypes();
- for (int32_t j = 0, num_types = dex_cache->NumResolvedTypes(); j < num_types; j++) {
- mirror::Class* klass = types[j].Read();
- if (klass != nullptr) {
- DCHECK_EQ(klass->GetClassLoader(), class_loader);
- const char* descriptor = klass->GetDescriptor(&temp);
- size_t hash = ComputeModifiedUtf8Hash(descriptor);
- mirror::Class* existing = class_table->Lookup(descriptor, hash);
- if (existing != nullptr) {
- CHECK_EQ(existing, klass) << PrettyClassAndClassLoader(existing) << " != "
- << PrettyClassAndClassLoader(klass);
- } else {
- class_table->Insert(klass);
- if (log_new_class_table_roots_) {
- new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
- }
- }
- }
- }
- }
- }
+ return nullptr;
}
class MoveClassTableToPreZygoteVisitor : public ClassLoaderVisitor {
@@ -3765,28 +3608,6 @@
VisitClassLoaders(&visitor);
}
-mirror::Class* ClassLinker::LookupClassFromBootImage(const char* descriptor) {
- ScopedAssertNoThreadSuspension ants("Image class lookup");
- std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector =
- GetImageDexCaches(Runtime::Current()->GetHeap()->GetBootImageSpaces());
- for (mirror::ObjectArray<mirror::DexCache>* dex_caches : dex_caches_vector) {
- for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- const DexFile* dex_file = dex_cache->GetDexFile();
- // Try binary searching the type index by descriptor.
- const DexFile::TypeId* type_id = dex_file->FindTypeId(descriptor);
- if (type_id != nullptr) {
- uint16_t type_idx = dex_file->GetIndexForTypeId(*type_id);
- mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
- if (klass != nullptr) {
- return klass;
- }
- }
- }
- }
- return nullptr;
-}
-
// Look up classes by hash and descriptor and put all matching ones in the result array.
class LookupClassesVisitor : public ClassLoaderVisitor {
public:
@@ -3812,9 +3633,6 @@
void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Class*>& result) {
result.clear();
- if (dex_cache_boot_image_class_lookup_required_) {
- AddBootImageClassesToClassTable();
- }
Thread* const self = Thread::Current();
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
const size_t hash = ComputeModifiedUtf8Hash(descriptor);
@@ -5217,14 +5035,6 @@
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
}
CHECK_EQ(existing, klass.Get());
- if (kIsDebugBuild && class_loader == nullptr && dex_cache_boot_image_class_lookup_required_) {
- // Check a class loaded with the system class loader matches one in the image if the class
- // is in the image.
- mirror::Class* const image_class = LookupClassFromBootImage(descriptor);
- if (image_class != nullptr) {
- CHECK_EQ(klass.Get(), existing) << descriptor;
- }
- }
if (log_new_class_table_roots_) {
new_class_roots_.push_back(GcRoot<mirror::Class>(h_new_class.Get()));
}
@@ -7988,7 +7798,7 @@
// other than by looking at the shorty ?
const size_t num_method_args = strlen(dex_file.StringDataByIdx(proto_id.shorty_idx_)) - 1;
- mirror::Class* class_type = mirror::Class::GetJavaLangClass();
+ ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass();
mirror::Class* array_of_class = FindArrayClass(self, &class_type);
Handle<mirror::ObjectArray<mirror::Class>> method_params(hs.NewHandle(
mirror::ObjectArray<mirror::Class>::Alloc(self, array_of_class, num_method_args)));
@@ -8093,9 +7903,6 @@
void ClassLinker::DumpForSigQuit(std::ostream& os) {
ScopedObjectAccess soa(Thread::Current());
- if (dex_cache_boot_image_class_lookup_required_) {
- AddBootImageClassesToClassTable();
- }
ReaderMutexLock mu(soa.Self(), *Locks::classlinker_classes_lock_);
os << "Zygote loaded classes=" << NumZygoteClasses() << " post zygote classes="
<< NumNonZygoteClasses() << "\n";
@@ -8131,9 +7938,6 @@
}
size_t ClassLinker::NumLoadedClasses() {
- if (dex_cache_boot_image_class_lookup_required_) {
- AddBootImageClassesToClassTable();
- }
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
// Only return non zygote classes since these are the ones which apps which care about.
return NumNonZygoteClasses();
@@ -8349,12 +8153,12 @@
}
}
-void ClassLinker::InsertDexFileInToClassLoader(mirror::Object* dex_file,
- mirror::ClassLoader* class_loader) {
+void ClassLinker::InsertDexFileInToClassLoader(ObjPtr<mirror::Object> dex_file,
+ ObjPtr<mirror::ClassLoader> class_loader) {
DCHECK(dex_file != nullptr);
Thread* const self = Thread::Current();
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- ClassTable* const table = ClassTableForClassLoader(class_loader);
+ ClassTable* const table = ClassTableForClassLoader(class_loader.Ptr());
DCHECK(table != nullptr);
if (table->InsertStrongRoot(dex_file) && class_loader != nullptr) {
// It was not already inserted, perform the write barrier to let the GC know the class loader's
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 43ffc8e..6437010 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -181,7 +181,7 @@
const char* descriptor,
size_t hash,
Handle<mirror::ClassLoader> class_loader,
- mirror::Class** result)
+ ObjPtr<mirror::Class>* result)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
@@ -192,7 +192,7 @@
REQUIRES(!dex_lock_);
// Finds the array class given for the element class.
- mirror::Class* FindArrayClass(Thread* self, mirror::Class** element_class)
+ mirror::Class* FindArrayClass(Thread* self, ObjPtr<mirror::Class>* element_class)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
@@ -561,17 +561,6 @@
return class_roots;
}
- // Move all of the boot image classes into the class table for faster lookups.
- void AddBootImageClassesToClassTable()
- REQUIRES(!Locks::classlinker_classes_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Add image classes to the class table.
- void AddImageClassesToClassTable(std::vector<gc::space::ImageSpace*> image_spaces,
- mirror::ClassLoader* class_loader)
- REQUIRES(!Locks::classlinker_classes_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Move the class table to the pre-zygote table to reduce memory usage. This works by ensuring
// that no more classes are ever added to the pre zygote table which makes it that the pages
// always remain shared dirty instead of private dirty.
@@ -617,7 +606,8 @@
REQUIRES_SHARED(Locks::mutator_lock_);
// May be called with null class_loader due to legacy code. b/27954959
- void InsertDexFileInToClassLoader(mirror::Object* dex_file, mirror::ClassLoader* class_loader)
+ void InsertDexFileInToClassLoader(ObjPtr<mirror::Object> dex_file,
+ ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1050,9 +1040,6 @@
void EnsureSkipAccessChecksMethods(Handle<mirror::Class> c)
REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Class* LookupClassFromBootImage(const char* descriptor)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Register a class loader and create its class table and allocator. Should not be called if
// these are already created.
void RegisterClassLoader(mirror::ClassLoader* class_loader)
@@ -1157,8 +1144,6 @@
// New class roots, only used by CMS since the GC needs to mark these in the pause.
std::vector<GcRoot<mirror::Class>> new_class_roots_ GUARDED_BY(Locks::classlinker_classes_lock_);
- // Do we need to search dex caches to find boot image classes?
- bool dex_cache_boot_image_class_lookup_required_;
// Number of times we've searched dex caches for a class. After a certain number of misses we move
// the classes into the class_table_ to avoid dex cache based searches.
Atomic<uint32_t> failed_dex_cache_class_lookups_;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 6279717..4fac830 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -216,7 +216,7 @@
EXPECT_STREQ(direct_interface0->GetDescriptor(&temp), "Ljava/lang/Cloneable;");
ObjPtr<mirror::Class> direct_interface1 = mirror::Class::GetDirectInterface(self, array, 1);
EXPECT_STREQ(direct_interface1->GetDescriptor(&temp), "Ljava/io/Serializable;");
- mirror::Class* array_ptr = array->GetComponentType();
+ ObjPtr<mirror::Class> array_ptr = array->GetComponentType();
EXPECT_OBJ_PTR_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get());
PointerSize pointer_size = class_linker_->GetImagePointerSize();
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index 2ae7e8c..d435050 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -156,7 +156,7 @@
return ComputeModifiedUtf8Hash(descriptor);
}
-bool ClassTable::InsertStrongRoot(mirror::Object* obj) {
+bool ClassTable::InsertStrongRoot(ObjPtr<mirror::Object> obj) {
WriterMutexLock mu(Thread::Current(), lock_);
DCHECK(obj != nullptr);
for (GcRoot<mirror::Object>& root : strong_roots_) {
@@ -167,7 +167,7 @@
strong_roots_.push_back(GcRoot<mirror::Object>(obj));
// If `obj` is a dex cache associated with a new oat file with GC roots, add it to oat_files_.
if (obj->IsDexCache()) {
- const DexFile* dex_file = down_cast<mirror::DexCache*>(obj)->GetDexFile();
+ const DexFile* dex_file = ObjPtr<mirror::DexCache>::DownCast(obj)->GetDexFile();
if (dex_file != nullptr && dex_file->GetOatDexFile() != nullptr) {
const OatFile* oat_file = dex_file->GetOatDexFile()->GetOatFile();
if (!oat_file->GetBssGcRoots().empty() && !ContainsElement(oat_files_, oat_file)) {
diff --git a/runtime/class_table.h b/runtime/class_table.h
index acb15c7..20e434d 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -27,6 +27,7 @@
#include "base/mutex.h"
#include "dex_file.h"
#include "gc_root.h"
+#include "obj_ptr.h"
#include "object_callbacks.h"
#include "runtime.h"
@@ -136,7 +137,7 @@
REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if we inserted the strong root, false if it already exists.
- bool InsertStrongRoot(mirror::Object* obj)
+ bool InsertStrongRoot(ObjPtr<mirror::Object> obj)
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 193f6ee..5409fcb 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -57,7 +57,7 @@
// everything else. In case you want to see all messages, comment out the line.
setenv("ANDROID_LOG_TAGS", "*:e", 1);
- art::InitLogging(argv);
+ art::InitLogging(argv, art::Runtime::Aborter);
LOG(INFO) << "Running main() from common_runtime_test.cc...";
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc
index 576c4aa..e538334 100644
--- a/runtime/dex_file_annotations.cc
+++ b/runtime/dex_file_annotations.cc
@@ -254,7 +254,7 @@
return nullptr;
}
- mirror::Class* annotation_member_class =
+ ObjPtr<mirror::Class> annotation_member_class =
soa.Decode<mirror::Class>(WellKnownClasses::libcore_reflect_AnnotationMember).Ptr();
mirror::Class* annotation_member_array_class =
class_linker->FindArrayClass(self, &annotation_member_class);
@@ -731,7 +731,7 @@
if (annotation_item == nullptr) {
return nullptr;
}
- mirror::Class* string_class = mirror::String::GetJavaLangString();
+ ObjPtr<mirror::Class> string_class = mirror::String::GetJavaLangString();
Handle<mirror::Class> string_array_class(hs.NewHandle(
Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &string_class)));
if (string_array_class.Get() == nullptr) {
@@ -757,7 +757,7 @@
if (annotation_item == nullptr) {
return nullptr;
}
- mirror::Class* class_class = mirror::Class::GetJavaLangClass();
+ ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass();
Handle<mirror::Class> class_array_class(hs.NewHandle(
Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &class_class)));
if (class_array_class.Get() == nullptr) {
@@ -839,8 +839,8 @@
Thread* self = Thread::Current();
ScopedObjectAccessUnchecked soa(self);
StackHandleScope<1> hs(self);
- mirror::Class* annotation_array_class =
- soa.Decode<mirror::Class>(WellKnownClasses::java_lang_annotation_Annotation__array).Ptr();
+ ObjPtr<mirror::Class> annotation_array_class =
+ soa.Decode<mirror::Class>(WellKnownClasses::java_lang_annotation_Annotation__array);
mirror::Class* annotation_array_array_class =
Runtime::Current()->GetClassLinker()->FindArrayClass(self, &annotation_array_class);
if (annotation_array_array_class == nullptr) {
@@ -1049,7 +1049,7 @@
StackHandleScope<5> hs(Thread::Current());
// Extract the parameters' names String[].
- mirror::Class* string_class = mirror::String::GetJavaLangString();
+ ObjPtr<mirror::Class> string_class = mirror::String::GetJavaLangString();
Handle<mirror::Class> string_array_class(hs.NewHandle(
Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &string_class)));
if (UNLIKELY(string_array_class.Get() == nullptr)) {
@@ -1139,7 +1139,7 @@
return nullptr;
}
StackHandleScope<1> hs(Thread::Current());
- mirror::Class* class_class = mirror::Class::GetJavaLangClass();
+ ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass();
Handle<mirror::Class> class_array_class(hs.NewHandle(
Runtime::Current()->GetClassLinker()->FindArrayClass(hs.Self(), &class_class)));
if (class_array_class.Get() == nullptr) {
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 096f003..2ea7bb6 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -36,8 +36,7 @@
ElfFileImpl<ElfTypes>::ElfFileImpl(File* file, bool writable,
bool program_header_only,
uint8_t* requested_base)
- : file_(file),
- writable_(writable),
+ : writable_(writable),
program_header_only_(program_header_only),
header_(nullptr),
base_address_(nullptr),
@@ -74,7 +73,7 @@
prot = PROT_READ;
flags = MAP_PRIVATE;
}
- if (!elf_file->Setup(prot, flags, low_4gb, error_msg)) {
+ if (!elf_file->Setup(file, prot, flags, low_4gb, error_msg)) {
return nullptr;
}
return elf_file.release();
@@ -89,39 +88,44 @@
std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(new ElfFileImpl<ElfTypes>
(file, (prot & PROT_WRITE) == PROT_WRITE, /*program_header_only*/false,
/*requested_base*/nullptr));
- if (!elf_file->Setup(prot, flags, low_4gb, error_msg)) {
+ if (!elf_file->Setup(file, prot, flags, low_4gb, error_msg)) {
return nullptr;
}
return elf_file.release();
}
template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::Setup(int prot, int flags, bool low_4gb, std::string* error_msg) {
- int64_t temp_file_length = file_->GetLength();
+bool ElfFileImpl<ElfTypes>::Setup(File* file,
+ int prot,
+ int flags,
+ bool low_4gb,
+ std::string* error_msg) {
+ int64_t temp_file_length = file->GetLength();
if (temp_file_length < 0) {
errno = -temp_file_length;
*error_msg = StringPrintf("Failed to get length of file: '%s' fd=%d: %s",
- file_->GetPath().c_str(), file_->Fd(), strerror(errno));
+ file->GetPath().c_str(), file->Fd(), strerror(errno));
return false;
}
size_t file_length = static_cast<size_t>(temp_file_length);
if (file_length < sizeof(Elf_Ehdr)) {
*error_msg = StringPrintf("File size of %zd bytes not large enough to contain ELF header of "
"%zd bytes: '%s'", file_length, sizeof(Elf_Ehdr),
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (program_header_only_) {
// first just map ELF header to get program header size information
size_t elf_header_size = sizeof(Elf_Ehdr);
- if (!SetMap(MemMap::MapFile(elf_header_size,
+ if (!SetMap(file,
+ MemMap::MapFile(elf_header_size,
prot,
flags,
- file_->Fd(),
+ file->Fd(),
0,
low_4gb,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
error_msg),
error_msg)) {
return false;
@@ -131,16 +135,17 @@
if (file_length < program_header_size) {
*error_msg = StringPrintf("File size of %zd bytes not large enough to contain ELF program "
"header of %zd bytes: '%s'", file_length,
- sizeof(Elf_Ehdr), file_->GetPath().c_str());
+ sizeof(Elf_Ehdr), file->GetPath().c_str());
return false;
}
- if (!SetMap(MemMap::MapFile(program_header_size,
+ if (!SetMap(file,
+ MemMap::MapFile(program_header_size,
prot,
flags,
- file_->Fd(),
+ file->Fd(),
0,
low_4gb,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
error_msg),
error_msg)) {
*error_msg = StringPrintf("Failed to map ELF program headers: %s", error_msg->c_str());
@@ -148,13 +153,14 @@
}
} else {
// otherwise map entire file
- if (!SetMap(MemMap::MapFile(file_->GetLength(),
+ if (!SetMap(file,
+ MemMap::MapFile(file->GetLength(),
prot,
flags,
- file_->Fd(),
+ file->Fd(),
0,
low_4gb,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
error_msg),
error_msg)) {
*error_msg = StringPrintf("Failed to map ELF file: %s", error_msg->c_str());
@@ -178,7 +184,7 @@
Elf_Shdr* shstrtab_section_header = GetSectionNameStringSection();
if (shstrtab_section_header == nullptr) {
*error_msg = StringPrintf("Failed to find shstrtab section header in ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
@@ -186,7 +192,7 @@
dynamic_program_header_ = FindProgamHeaderByType(PT_DYNAMIC);
if (dynamic_program_header_ == nullptr) {
*error_msg = StringPrintf("Failed to find PT_DYNAMIC program header in ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
@@ -200,7 +206,7 @@
Elf_Shdr* section_header = GetSectionHeader(i);
if (section_header == nullptr) {
*error_msg = StringPrintf("Failed to find section header for section %d in ELF file: '%s'",
- i, file_->GetPath().c_str());
+ i, file->GetPath().c_str());
return false;
}
switch (section_header->sh_type) {
@@ -245,7 +251,7 @@
if (reinterpret_cast<uint8_t*>(dynamic_section_start_) !=
Begin() + section_header->sh_offset) {
LOG(WARNING) << "Failed to find matching SHT_DYNAMIC for PT_DYNAMIC in "
- << file_->GetPath() << ": " << std::hex
+ << file->GetPath() << ": " << std::hex
<< reinterpret_cast<void*>(dynamic_section_start_)
<< " != " << reinterpret_cast<void*>(Begin() + section_header->sh_offset);
return false;
@@ -263,7 +269,7 @@
}
// Check for the existence of some sections.
- if (!CheckSectionsExist(error_msg)) {
+ if (!CheckSectionsExist(file, error_msg)) {
return false;
}
}
@@ -283,7 +289,7 @@
uint8_t** target, std::string* error_msg) {
if (Begin() + offset >= End()) {
*error_msg = StringPrintf("Offset %d is out of range for %s in ELF file: '%s'", offset, label,
- file_->GetPath().c_str());
+ file_path_.c_str());
return false;
}
*target = Begin() + offset;
@@ -324,11 +330,11 @@
}
template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::CheckSectionsExist(std::string* error_msg) const {
+ bool ElfFileImpl<ElfTypes>::CheckSectionsExist(File* file, std::string* error_msg) const {
if (!program_header_only_) {
// If in full mode, need section headers.
if (section_headers_start_ == nullptr) {
- *error_msg = StringPrintf("No section headers in ELF file: '%s'", file_->GetPath().c_str());
+ *error_msg = StringPrintf("No section headers in ELF file: '%s'", file->GetPath().c_str());
return false;
}
}
@@ -336,14 +342,14 @@
// This is redundant, but defensive.
if (dynamic_program_header_ == nullptr) {
*error_msg = StringPrintf("Failed to find PT_DYNAMIC program header in ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
// Need a dynamic section. This is redundant, but defensive.
if (dynamic_section_start_ == nullptr) {
*error_msg = StringPrintf("Failed to find dynamic section in ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
@@ -352,7 +358,7 @@
if (symtab_section_start_ != nullptr) {
// When there's a symtab, there should be a strtab.
if (strtab_section_start_ == nullptr) {
- *error_msg = StringPrintf("No strtab for symtab in ELF file: '%s'", file_->GetPath().c_str());
+ *error_msg = StringPrintf("No strtab for symtab in ELF file: '%s'", file->GetPath().c_str());
return false;
}
@@ -360,25 +366,25 @@
if (!CheckSectionsLinked(reinterpret_cast<const uint8_t*>(symtab_section_start_),
reinterpret_cast<const uint8_t*>(strtab_section_start_))) {
*error_msg = StringPrintf("Symtab is not linked to the strtab in ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
}
// We always need a dynstr & dynsym.
if (dynstr_section_start_ == nullptr) {
- *error_msg = StringPrintf("No dynstr in ELF file: '%s'", file_->GetPath().c_str());
+ *error_msg = StringPrintf("No dynstr in ELF file: '%s'", file->GetPath().c_str());
return false;
}
if (dynsym_section_start_ == nullptr) {
- *error_msg = StringPrintf("No dynsym in ELF file: '%s'", file_->GetPath().c_str());
+ *error_msg = StringPrintf("No dynsym in ELF file: '%s'", file->GetPath().c_str());
return false;
}
// Need a hash section for dynamic symbol lookup.
if (hash_section_start_ == nullptr) {
*error_msg = StringPrintf("Failed to find hash section in ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
@@ -386,7 +392,7 @@
if (!CheckSectionsLinked(reinterpret_cast<const uint8_t*>(hash_section_start_),
reinterpret_cast<const uint8_t*>(dynsym_section_start_))) {
*error_msg = StringPrintf("Hash section is not linked to the dynstr in ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
@@ -397,9 +403,9 @@
// It might not be mapped, but we can compare against the file size.
int64_t offset = static_cast<int64_t>(GetHeader().e_shoff +
(GetHeader().e_shstrndx * GetHeader().e_shentsize));
- if (offset >= file_->GetLength()) {
+ if (offset >= file->GetLength()) {
*error_msg = StringPrintf("Shstrtab is not in the mapped ELF file: '%s'",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
}
@@ -408,15 +414,15 @@
}
template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::SetMap(MemMap* map, std::string* error_msg) {
+bool ElfFileImpl<ElfTypes>::SetMap(File* file, MemMap* map, std::string* error_msg) {
if (map == nullptr) {
// MemMap::Open should have already set an error.
DCHECK(!error_msg->empty());
return false;
}
map_.reset(map);
- CHECK(map_.get() != nullptr) << file_->GetPath();
- CHECK(map_->Begin() != nullptr) << file_->GetPath();
+ CHECK(map_.get() != nullptr) << file->GetPath();
+ CHECK(map_->Begin() != nullptr) << file->GetPath();
header_ = reinterpret_cast<Elf_Ehdr*>(map_->Begin());
if ((ELFMAG0 != header_->e_ident[EI_MAG0])
@@ -425,7 +431,7 @@
|| (ELFMAG3 != header_->e_ident[EI_MAG3])) {
*error_msg = StringPrintf("Failed to find ELF magic value %d %d %d %d in %s, found %d %d %d %d",
ELFMAG0, ELFMAG1, ELFMAG2, ELFMAG3,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
header_->e_ident[EI_MAG0],
header_->e_ident[EI_MAG1],
header_->e_ident[EI_MAG2],
@@ -436,90 +442,90 @@
if (elf_class != header_->e_ident[EI_CLASS]) {
*error_msg = StringPrintf("Failed to find expected EI_CLASS value %d in %s, found %d",
elf_class,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
header_->e_ident[EI_CLASS]);
return false;
}
if (ELFDATA2LSB != header_->e_ident[EI_DATA]) {
*error_msg = StringPrintf("Failed to find expected EI_DATA value %d in %s, found %d",
ELFDATA2LSB,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
header_->e_ident[EI_CLASS]);
return false;
}
if (EV_CURRENT != header_->e_ident[EI_VERSION]) {
*error_msg = StringPrintf("Failed to find expected EI_VERSION value %d in %s, found %d",
EV_CURRENT,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
header_->e_ident[EI_CLASS]);
return false;
}
if (ET_DYN != header_->e_type) {
*error_msg = StringPrintf("Failed to find expected e_type value %d in %s, found %d",
ET_DYN,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
header_->e_type);
return false;
}
if (EV_CURRENT != header_->e_version) {
*error_msg = StringPrintf("Failed to find expected e_version value %d in %s, found %d",
EV_CURRENT,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
header_->e_version);
return false;
}
if (0 != header_->e_entry) {
*error_msg = StringPrintf("Failed to find expected e_entry value %d in %s, found %d",
0,
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
static_cast<int32_t>(header_->e_entry));
return false;
}
if (0 == header_->e_phoff) {
*error_msg = StringPrintf("Failed to find non-zero e_phoff value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (0 == header_->e_shoff) {
*error_msg = StringPrintf("Failed to find non-zero e_shoff value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (0 == header_->e_ehsize) {
*error_msg = StringPrintf("Failed to find non-zero e_ehsize value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (0 == header_->e_phentsize) {
*error_msg = StringPrintf("Failed to find non-zero e_phentsize value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (0 == header_->e_phnum) {
*error_msg = StringPrintf("Failed to find non-zero e_phnum value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (0 == header_->e_shentsize) {
*error_msg = StringPrintf("Failed to find non-zero e_shentsize value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (0 == header_->e_shnum) {
*error_msg = StringPrintf("Failed to find non-zero e_shnum value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (0 == header_->e_shstrndx) {
*error_msg = StringPrintf("Failed to find non-zero e_shstrndx value in %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (header_->e_shstrndx >= header_->e_shnum) {
*error_msg = StringPrintf("Failed to find e_shnum value %d less than %d in %s",
header_->e_shstrndx,
header_->e_shnum,
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
@@ -528,14 +534,14 @@
*error_msg = StringPrintf("Failed to find e_phoff value %" PRIu64 " less than %zd in %s",
static_cast<uint64_t>(header_->e_phoff),
Size(),
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (header_->e_shoff >= Size()) {
*error_msg = StringPrintf("Failed to find e_shoff value %" PRIu64 " less than %zd in %s",
static_cast<uint64_t>(header_->e_shoff),
Size(),
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
}
@@ -577,7 +583,7 @@
template <typename ElfTypes>
typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::GetSymbolSectionStart(
Elf_Word section_type) const {
- CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
+ CHECK(IsSymbolSectionType(section_type)) << file_path_ << " " << section_type;
switch (section_type) {
case SHT_SYMTAB: {
return symtab_section_start_;
@@ -597,7 +603,7 @@
template <typename ElfTypes>
const char* ElfFileImpl<ElfTypes>::GetStringSectionStart(
Elf_Word section_type) const {
- CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
+ CHECK(IsSymbolSectionType(section_type)) << file_path_ << " " << section_type;
switch (section_type) {
case SHT_SYMTAB: {
return strtab_section_start_;
@@ -615,7 +621,7 @@
template <typename ElfTypes>
const char* ElfFileImpl<ElfTypes>::GetString(Elf_Word section_type,
Elf_Word i) const {
- CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
+ CHECK(IsSymbolSectionType(section_type)) << file_path_ << " " << section_type;
if (i == 0) {
return nullptr;
}
@@ -673,7 +679,7 @@
template <typename ElfTypes>
typename ElfTypes::Phdr* ElfFileImpl<ElfTypes>::GetProgramHeader(Elf_Word i) const {
- CHECK_LT(i, GetProgramHeaderNum()) << file_->GetPath(); // Sanity check for caller.
+ CHECK_LT(i, GetProgramHeaderNum()) << file_path_; // Sanity check for caller.
uint8_t* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize);
if (program_header >= End()) {
return nullptr; // Failure condition.
@@ -701,7 +707,7 @@
typename ElfTypes::Shdr* ElfFileImpl<ElfTypes>::GetSectionHeader(Elf_Word i) const {
// Can only access arbitrary sections when we have the whole file, not just program header.
// Even if we Load(), it doesn't bring in all the sections.
- CHECK(!program_header_only_) << file_->GetPath();
+ CHECK(!program_header_only_) << file_path_;
if (i >= GetSectionHeaderNum()) {
return nullptr; // Failure condition.
}
@@ -716,7 +722,7 @@
typename ElfTypes::Shdr* ElfFileImpl<ElfTypes>::FindSectionByType(Elf_Word type) const {
// Can only access arbitrary sections when we have the whole file, not just program header.
// We could change this to switch on known types if they were detected during loading.
- CHECK(!program_header_only_) << file_->GetPath();
+ CHECK(!program_header_only_) << file_path_;
for (Elf_Word i = 0; i < GetSectionHeaderNum(); i++) {
Elf_Shdr* section_header = GetSectionHeader(i);
if (section_header->sh_type == type) {
@@ -802,8 +808,8 @@
template <typename ElfTypes>
typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetSymbolNum(Elf_Shdr& section_header) const {
CHECK(IsSymbolSectionType(section_header.sh_type))
- << file_->GetPath() << " " << section_header.sh_type;
- CHECK_NE(0U, section_header.sh_entsize) << file_->GetPath();
+ << file_path_ << " " << section_header.sh_type;
+ CHECK_NE(0U, section_header.sh_entsize) << file_path_;
return section_header.sh_size / section_header.sh_entsize;
}
@@ -819,7 +825,7 @@
template <typename ElfTypes>
typename ElfFileImpl<ElfTypes>::SymbolTable**
ElfFileImpl<ElfTypes>::GetSymbolTable(Elf_Word section_type) {
- CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
+ CHECK(IsSymbolSectionType(section_type)) << file_path_ << " " << section_type;
switch (section_type) {
case SHT_SYMTAB: {
return &symtab_symbol_table_;
@@ -837,8 +843,8 @@
template <typename ElfTypes>
typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::FindSymbolByName(
Elf_Word section_type, const std::string& symbol_name, bool build_map) {
- CHECK(!program_header_only_) << file_->GetPath();
- CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
+ CHECK(!program_header_only_) << file_path_;
+ CHECK(IsSymbolSectionType(section_type)) << file_path_ << " " << section_type;
SymbolTable** symbol_table = GetSymbolTable(section_type);
if (*symbol_table != nullptr || build_map) {
@@ -928,7 +934,7 @@
template <typename ElfTypes>
const char* ElfFileImpl<ElfTypes>::GetString(Elf_Shdr& string_section,
Elf_Word i) const {
- CHECK(!program_header_only_) << file_->GetPath();
+ CHECK(!program_header_only_) << file_path_;
// TODO: remove this static_cast from enum when using -std=gnu++0x
if (static_cast<Elf_Word>(SHT_STRTAB) != string_section.sh_type) {
return nullptr; // Failure condition.
@@ -954,7 +960,7 @@
template <typename ElfTypes>
typename ElfTypes::Dyn& ElfFileImpl<ElfTypes>::GetDynamic(Elf_Word i) const {
- CHECK_LT(i, GetDynamicNum()) << file_->GetPath();
+ CHECK_LT(i, GetDynamicNum()) << file_path_;
return *(GetDynamicSectionStart() + i);
}
@@ -981,40 +987,40 @@
template <typename ElfTypes>
typename ElfTypes::Rel* ElfFileImpl<ElfTypes>::GetRelSectionStart(Elf_Shdr& section_header) const {
- CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
+ CHECK(SHT_REL == section_header.sh_type) << file_path_ << " " << section_header.sh_type;
return reinterpret_cast<Elf_Rel*>(Begin() + section_header.sh_offset);
}
template <typename ElfTypes>
typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetRelNum(Elf_Shdr& section_header) const {
- CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
- CHECK_NE(0U, section_header.sh_entsize) << file_->GetPath();
+ CHECK(SHT_REL == section_header.sh_type) << file_path_ << " " << section_header.sh_type;
+ CHECK_NE(0U, section_header.sh_entsize) << file_path_;
return section_header.sh_size / section_header.sh_entsize;
}
template <typename ElfTypes>
typename ElfTypes::Rel& ElfFileImpl<ElfTypes>::GetRel(Elf_Shdr& section_header, Elf_Word i) const {
- CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
- CHECK_LT(i, GetRelNum(section_header)) << file_->GetPath();
+ CHECK(SHT_REL == section_header.sh_type) << file_path_ << " " << section_header.sh_type;
+ CHECK_LT(i, GetRelNum(section_header)) << file_path_;
return *(GetRelSectionStart(section_header) + i);
}
template <typename ElfTypes>
typename ElfTypes::Rela* ElfFileImpl<ElfTypes>::GetRelaSectionStart(Elf_Shdr& section_header) const {
- CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
+ CHECK(SHT_RELA == section_header.sh_type) << file_path_ << " " << section_header.sh_type;
return reinterpret_cast<Elf_Rela*>(Begin() + section_header.sh_offset);
}
template <typename ElfTypes>
typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetRelaNum(Elf_Shdr& section_header) const {
- CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
+ CHECK(SHT_RELA == section_header.sh_type) << file_path_ << " " << section_header.sh_type;
return section_header.sh_size / section_header.sh_entsize;
}
template <typename ElfTypes>
typename ElfTypes::Rela& ElfFileImpl<ElfTypes>::GetRela(Elf_Shdr& section_header, Elf_Word i) const {
- CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
- CHECK_LT(i, GetRelaNum(section_header)) << file_->GetPath();
+ CHECK(SHT_RELA == section_header.sh_type) << file_path_ << " " << section_header.sh_type;
+ CHECK_LT(i, GetRelaNum(section_header)) << file_path_;
return *(GetRelaSectionStart(section_header) + i);
}
@@ -1037,7 +1043,7 @@
std::ostringstream oss;
oss << "Program header #" << i << " has overflow in p_vaddr+p_memsz: 0x" << std::hex
<< program_header->p_vaddr << "+0x" << program_header->p_memsz << "=0x" << end_vaddr
- << " in ELF file \"" << file_->GetPath() << "\"";
+ << " in ELF file \"" << file_path_ << "\"";
*error_msg = oss.str();
*size = static_cast<size_t>(-1);
return false;
@@ -1048,13 +1054,13 @@
}
min_vaddr = RoundDown(min_vaddr, kPageSize);
max_vaddr = RoundUp(max_vaddr, kPageSize);
- CHECK_LT(min_vaddr, max_vaddr) << file_->GetPath();
+ CHECK_LT(min_vaddr, max_vaddr) << file_path_;
Elf_Addr loaded_size = max_vaddr - min_vaddr;
// Check that the loaded_size fits in size_t.
if (UNLIKELY(loaded_size > std::numeric_limits<size_t>::max())) {
std::ostringstream oss;
oss << "Loaded size is 0x" << std::hex << loaded_size << " but maximum size_t is 0x"
- << std::numeric_limits<size_t>::max() << " for ELF file \"" << file_->GetPath() << "\"";
+ << std::numeric_limits<size_t>::max() << " for ELF file \"" << file_path_ << "\"";
*error_msg = oss.str();
*size = static_cast<size_t>(-1);
return false;
@@ -1064,8 +1070,11 @@
}
template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* error_msg) {
- CHECK(program_header_only_) << file_->GetPath();
+bool ElfFileImpl<ElfTypes>::Load(File* file,
+ bool executable,
+ bool low_4gb,
+ std::string* error_msg) {
+ CHECK(program_header_only_) << file->GetPath();
if (executable) {
InstructionSet elf_ISA = GetInstructionSetFromELF(GetHeader().e_machine, GetHeader().e_flags);
@@ -1082,7 +1091,7 @@
Elf_Phdr* program_header = GetProgramHeader(i);
if (program_header == nullptr) {
*error_msg = StringPrintf("No program header for entry %d in ELF file %s.",
- i, file_->GetPath().c_str());
+ i, file->GetPath().c_str());
return false;
}
@@ -1106,11 +1115,11 @@
// non-zero, the segments require the specific address specified,
// which either was specified in the file because we already set
// base_address_ after the first zero segment).
- int64_t temp_file_length = file_->GetLength();
+ int64_t temp_file_length = file->GetLength();
if (temp_file_length < 0) {
errno = -temp_file_length;
*error_msg = StringPrintf("Failed to get length of file: '%s' fd=%d: %s",
- file_->GetPath().c_str(), file_->Fd(), strerror(errno));
+ file->GetPath().c_str(), file->Fd(), strerror(errno));
return false;
}
size_t file_length = static_cast<size_t>(temp_file_length);
@@ -1122,7 +1131,7 @@
reserve_base_override = requested_base_;
}
std::string reservation_name("ElfFile reservation for ");
- reservation_name += file_->GetPath();
+ reservation_name += file->GetPath();
size_t loaded_size;
if (!GetLoadedSize(&loaded_size, error_msg)) {
DCHECK(!error_msg->empty());
@@ -1178,7 +1187,7 @@
*error_msg = StringPrintf("Invalid p_filesz > p_memsz (%" PRIu64 " > %" PRIu64 "): %s",
static_cast<uint64_t>(program_header->p_filesz),
static_cast<uint64_t>(program_header->p_memsz),
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (program_header->p_filesz < program_header->p_memsz &&
@@ -1187,14 +1196,14 @@
" < %" PRIu64 "): %s",
static_cast<uint64_t>(program_header->p_filesz),
static_cast<uint64_t>(program_header->p_memsz),
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (file_length < (program_header->p_offset + program_header->p_filesz)) {
*error_msg = StringPrintf("File size of %zd bytes not large enough to contain ELF segment "
"%d of %" PRIu64 " bytes: '%s'", file_length, i,
static_cast<uint64_t>(program_header->p_offset + program_header->p_filesz),
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
if (program_header->p_filesz != 0u) {
@@ -1203,28 +1212,28 @@
program_header->p_filesz,
prot,
flags,
- file_->Fd(),
+ file->Fd(),
program_header->p_offset,
/*low4_gb*/false,
/*reuse*/true, // implies MAP_FIXED
- file_->GetPath().c_str(),
+ file->GetPath().c_str(),
error_msg));
if (segment.get() == nullptr) {
*error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
- i, file_->GetPath().c_str(), error_msg->c_str());
+ i, file->GetPath().c_str(), error_msg->c_str());
return false;
}
if (segment->Begin() != p_vaddr) {
*error_msg = StringPrintf("Failed to map ELF file segment %d from %s at expected address %p, "
"instead mapped to %p",
- i, file_->GetPath().c_str(), p_vaddr, segment->Begin());
+ i, file->GetPath().c_str(), p_vaddr, segment->Begin());
return false;
}
segments_.push_back(segment.release());
}
if (program_header->p_filesz < program_header->p_memsz) {
std::string name = StringPrintf("Zero-initialized segment %" PRIu64 " of ELF file %s",
- static_cast<uint64_t>(i), file_->GetPath().c_str());
+ static_cast<uint64_t>(i), file->GetPath().c_str());
std::unique_ptr<MemMap> segment(
MemMap::MapAnonymous(name.c_str(),
p_vaddr + program_header->p_filesz,
@@ -1232,13 +1241,13 @@
prot, false, true /* reuse */, error_msg));
if (segment == nullptr) {
*error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s: %s",
- i, file_->GetPath().c_str(), error_msg->c_str());
+ i, file->GetPath().c_str(), error_msg->c_str());
return false;
}
if (segment->Begin() != p_vaddr) {
*error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s "
"at expected address %p, instead mapped to %p",
- i, file_->GetPath().c_str(), p_vaddr, segment->Begin());
+ i, file->GetPath().c_str(), p_vaddr, segment->Begin());
return false;
}
segments_.push_back(segment.release());
@@ -1249,7 +1258,7 @@
uint8_t* dsptr = base_address_ + GetDynamicProgramHeader().p_vaddr;
if ((dsptr < Begin() || dsptr >= End()) && !ValidPointer(dsptr)) {
*error_msg = StringPrintf("dynamic section address invalid in ELF file %s",
- file_->GetPath().c_str());
+ file->GetPath().c_str());
return false;
}
dynamic_section_start_ = reinterpret_cast<Elf_Dyn*>(dsptr);
@@ -1261,7 +1270,7 @@
case DT_HASH: {
if (!ValidPointer(d_ptr)) {
*error_msg = StringPrintf("DT_HASH value %p does not refer to a loaded ELF segment of %s",
- d_ptr, file_->GetPath().c_str());
+ d_ptr, file->GetPath().c_str());
return false;
}
hash_section_start_ = reinterpret_cast<Elf_Word*>(d_ptr);
@@ -1270,7 +1279,7 @@
case DT_STRTAB: {
if (!ValidPointer(d_ptr)) {
*error_msg = StringPrintf("DT_HASH value %p does not refer to a loaded ELF segment of %s",
- d_ptr, file_->GetPath().c_str());
+ d_ptr, file->GetPath().c_str());
return false;
}
dynstr_section_start_ = reinterpret_cast<char*>(d_ptr);
@@ -1279,7 +1288,7 @@
case DT_SYMTAB: {
if (!ValidPointer(d_ptr)) {
*error_msg = StringPrintf("DT_HASH value %p does not refer to a loaded ELF segment of %s",
- d_ptr, file_->GetPath().c_str());
+ d_ptr, file->GetPath().c_str());
return false;
}
dynsym_section_start_ = reinterpret_cast<Elf_Sym*>(d_ptr);
@@ -1289,7 +1298,7 @@
if (GetDynamicNum() != i+1) {
*error_msg = StringPrintf("DT_NULL found after %d .dynamic entries, "
"expected %d as implied by size of PT_DYNAMIC segment in %s",
- i + 1, GetDynamicNum(), file_->GetPath().c_str());
+ i + 1, GetDynamicNum(), file->GetPath().c_str());
return false;
}
break;
@@ -1298,7 +1307,7 @@
}
// Check for the existence of some sections.
- if (!CheckSectionsExist(error_msg)) {
+ if (!CheckSectionsExist(file, error_msg)) {
return false;
}
@@ -1392,7 +1401,7 @@
}
template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::Strip(std::string* error_msg) {
+bool ElfFileImpl<ElfTypes>::Strip(File* file, std::string* error_msg) {
// ELF files produced by MCLinker look roughly like this
//
// +------------+
@@ -1484,10 +1493,10 @@
GetHeader().e_shnum = section_headers.size();
GetHeader().e_shoff = shoff;
- int result = ftruncate(file_->Fd(), offset);
+ int result = ftruncate(file->Fd(), offset);
if (result != 0) {
*error_msg = StringPrintf("Failed to truncate while stripping ELF file: '%s': %s",
- file_->GetPath().c_str(), strerror(errno));
+ file->GetPath().c_str(), strerror(errno));
return false;
}
return true;
@@ -1498,32 +1507,32 @@
template <typename ElfTypes>
bool ElfFileImpl<ElfTypes>::Fixup(Elf_Addr base_address) {
if (!FixupDynamic(base_address)) {
- LOG(WARNING) << "Failed to fixup .dynamic in " << file_->GetPath();
+ LOG(WARNING) << "Failed to fixup .dynamic in " << file_path_;
return false;
}
if (!FixupSectionHeaders(base_address)) {
- LOG(WARNING) << "Failed to fixup section headers in " << file_->GetPath();
+ LOG(WARNING) << "Failed to fixup section headers in " << file_path_;
return false;
}
if (!FixupProgramHeaders(base_address)) {
- LOG(WARNING) << "Failed to fixup program headers in " << file_->GetPath();
+ LOG(WARNING) << "Failed to fixup program headers in " << file_path_;
return false;
}
if (!FixupSymbols(base_address, true)) {
- LOG(WARNING) << "Failed to fixup .dynsym in " << file_->GetPath();
+ LOG(WARNING) << "Failed to fixup .dynsym in " << file_path_;
return false;
}
if (!FixupSymbols(base_address, false)) {
- LOG(WARNING) << "Failed to fixup .symtab in " << file_->GetPath();
+ LOG(WARNING) << "Failed to fixup .symtab in " << file_path_;
return false;
}
if (!FixupRelocations(base_address)) {
- LOG(WARNING) << "Failed to fixup .rel.dyn in " << file_->GetPath();
+ LOG(WARNING) << "Failed to fixup .rel.dyn in " << file_path_;
return false;
}
static_assert(sizeof(Elf_Off) >= sizeof(base_address), "Potentially losing precision.");
if (!FixupDebugSections(static_cast<Elf_Off>(base_address))) {
- LOG(WARNING) << "Failed to fixup debug sections in " << file_->GetPath();
+ LOG(WARNING) << "Failed to fixup debug sections in " << file_path_;
return false;
}
return true;
@@ -1538,7 +1547,7 @@
Elf_Addr d_ptr = elf_dyn.d_un.d_ptr;
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Dyn[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), i,
+ file_path_.c_str(), i,
static_cast<uint64_t>(d_ptr),
static_cast<uint64_t>(d_ptr + base_address));
}
@@ -1560,7 +1569,7 @@
}
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Shdr[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), i,
+ file_path_.c_str(), i,
static_cast<uint64_t>(sh->sh_addr),
static_cast<uint64_t>(sh->sh_addr + base_address));
}
@@ -1575,19 +1584,19 @@
for (Elf_Word i = 0; i < GetProgramHeaderNum(); i++) {
Elf_Phdr* ph = GetProgramHeader(i);
CHECK(ph != nullptr);
- CHECK_EQ(ph->p_vaddr, ph->p_paddr) << GetFile().GetPath() << " i=" << i;
+ CHECK_EQ(ph->p_vaddr, ph->p_paddr) << file_path_ << " i=" << i;
CHECK((ph->p_align == 0) || (0 == ((ph->p_vaddr - ph->p_offset) & (ph->p_align - 1))))
- << GetFile().GetPath() << " i=" << i;
+ << file_path_ << " i=" << i;
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Phdr[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), i,
+ file_path_.c_str(), i,
static_cast<uint64_t>(ph->p_vaddr),
static_cast<uint64_t>(ph->p_vaddr + base_address));
}
ph->p_vaddr += base_address;
ph->p_paddr += base_address;
CHECK((ph->p_align == 0) || (0 == ((ph->p_vaddr - ph->p_offset) & (ph->p_align - 1))))
- << GetFile().GetPath() << " i=" << i;
+ << file_path_ << " i=" << i;
}
return true;
}
@@ -1599,7 +1608,7 @@
Elf_Shdr* symbol_section = FindSectionByType(section_type);
if (symbol_section == nullptr) {
// file is missing optional .symtab
- CHECK(!dynamic) << GetFile().GetPath();
+ CHECK(!dynamic) << file_path_;
return true;
}
for (uint32_t i = 0; i < GetSymbolNum(*symbol_section); i++) {
@@ -1608,7 +1617,7 @@
if (symbol->st_value != 0) {
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Sym[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), i,
+ file_path_.c_str(), i,
static_cast<uint64_t>(symbol->st_value),
static_cast<uint64_t>(symbol->st_value + base_address));
}
@@ -1628,7 +1637,7 @@
Elf_Rel& rel = GetRel(*sh, j);
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Rel[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), j,
+ file_path_.c_str(), j,
static_cast<uint64_t>(rel.r_offset),
static_cast<uint64_t>(rel.r_offset + base_address));
}
@@ -1639,7 +1648,7 @@
Elf_Rela& rela = GetRela(*sh, j);
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Rela[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), j,
+ file_path_.c_str(), j,
static_cast<uint64_t>(rela.r_offset),
static_cast<uint64_t>(rela.r_offset + base_address));
}
@@ -1695,8 +1704,9 @@
low_4gb,
error_msg,
requested_base);
- if (elf_file_impl == nullptr)
+ if (elf_file_impl == nullptr) {
return nullptr;
+ }
return new ElfFile(elf_file_impl);
} else if (header[EI_CLASS] == ELFCLASS32) {
ElfFileImpl32* elf_file_impl = ElfFileImpl32::Open(file,
@@ -1775,8 +1785,8 @@
return elf32_->func(__VA_ARGS__); \
}
-bool ElfFile::Load(bool executable, bool low_4gb, std::string* error_msg) {
- DELEGATE_TO_IMPL(Load, executable, low_4gb, error_msg);
+bool ElfFile::Load(File* file, bool executable, bool low_4gb, std::string* error_msg) {
+ DELEGATE_TO_IMPL(Load, file, executable, low_4gb, error_msg);
}
const uint8_t* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) const {
@@ -1795,8 +1805,8 @@
DELEGATE_TO_IMPL(End);
}
-const File& ElfFile::GetFile() const {
- DELEGATE_TO_IMPL(GetFile);
+const std::string& ElfFile::GetFilePath() const {
+ DELEGATE_TO_IMPL(GetFilePath);
}
bool ElfFile::GetSectionOffsetAndSize(const char* section_name, uint64_t* offset,
@@ -1854,10 +1864,11 @@
return false;
}
- if (elf_file->elf64_.get() != nullptr)
- return elf_file->elf64_->Strip(error_msg);
- else
- return elf_file->elf32_->Strip(error_msg);
+ if (elf_file->elf64_.get() != nullptr) {
+ return elf_file->elf64_->Strip(file, error_msg);
+ } else {
+ return elf_file->elf32_->Strip(file, error_msg);
+ }
}
bool ElfFile::Fixup(uint64_t base_address) {
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index c3616f7..b1c9395 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -53,7 +53,7 @@
~ElfFile();
// Load segments into memory based on PT_LOAD program headers
- bool Load(bool executable, bool low_4gb, std::string* error_msg);
+ bool Load(File* file, bool executable, bool low_4gb, std::string* error_msg);
const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name) const;
@@ -65,7 +65,7 @@
// The end of the memory map address range for this ELF file.
uint8_t* End() const;
- const File& GetFile() const;
+ const std::string& GetFilePath() const;
bool GetSectionOffsetAndSize(const char* section_name, uint64_t* offset, uint64_t* size) const;
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index 1cdbedc..04c2243 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -61,8 +61,8 @@
std::string* error_msg);
~ElfFileImpl();
- const File& GetFile() const {
- return *file_;
+ const std::string& GetFilePath() const {
+ return file_path_;
}
uint8_t* Begin() const {
@@ -119,7 +119,7 @@
// Load segments into memory based on PT_LOAD program headers.
// executable is true at run time, false at compile time.
- bool Load(bool executable, bool low_4gb, std::string* error_msg);
+ bool Load(File* file, bool executable, bool low_4gb, std::string* error_msg);
bool Fixup(Elf_Addr base_address);
bool FixupDynamic(Elf_Addr base_address);
@@ -132,14 +132,14 @@
static void ApplyOatPatches(const uint8_t* patches, const uint8_t* patches_end, Elf_Addr delta,
uint8_t* to_patch, const uint8_t* to_patch_end);
- bool Strip(std::string* error_msg);
+ bool Strip(File* file, std::string* error_msg);
private:
ElfFileImpl(File* file, bool writable, bool program_header_only, uint8_t* requested_base);
- bool Setup(int prot, int flags, bool low_4gb, std::string* error_msg);
+ bool Setup(File* file, int prot, int flags, bool low_4gb, std::string* error_msg);
- bool SetMap(MemMap* map, std::string* error_msg);
+ bool SetMap(File* file, MemMap* map, std::string* error_msg);
uint8_t* GetProgramHeadersStart() const;
uint8_t* GetSectionHeadersStart() const;
@@ -163,7 +163,7 @@
const Elf_Sym* FindDynamicSymbol(const std::string& symbol_name) const;
// Check that certain sections and their dependencies exist.
- bool CheckSectionsExist(std::string* error_msg) const;
+ bool CheckSectionsExist(File* file, std::string* error_msg) const;
// Check that the link of the first section links to the second section.
bool CheckSectionsLinked(const uint8_t* source, const uint8_t* target) const;
@@ -191,7 +191,7 @@
// Lookup a string by section type. Returns null for special 0 offset.
const char* GetString(Elf_Word section_type, Elf_Word) const;
- const File* const file_;
+ const std::string file_path_;
const bool writable_;
const bool program_header_only_;
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 4311d19..2a3a6bf 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -63,12 +63,14 @@
auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
mirror::String* result = ResolveStringFromCode(caller, string_idx);
if (LIKELY(result != nullptr)) {
- // For AOT code, we need a write barrier for the dex cache that holds the GC roots in the .bss.
+ // For AOT code, we need a write barrier for the class loader that holds
+ // the GC roots in the .bss.
const DexFile* dex_file = caller->GetDexFile();
if (dex_file != nullptr &&
dex_file->GetOatDexFile() != nullptr &&
!dex_file->GetOatDexFile()->GetOatFile()->GetBssGcRoots().empty()) {
mirror::ClassLoader* class_loader = caller->GetDeclaringClass()->GetClassLoader();
+ DCHECK(class_loader != nullptr); // We do not use .bss GC roots for boot image.
// Note that we emit the barrier before the compiled code stores the string as GC root.
// This is OK as there is no suspend point point in between.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 7229f76..29bab01 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -66,7 +66,9 @@
: collector_(collector), target_space_(target_space),
contains_reference_to_target_space_(contains_reference_to_target_space) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
@@ -77,7 +79,7 @@
}
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
if (target_space_->HasAddress(ref->GetReferent())) {
*contains_reference_to_target_space_ = true;
@@ -115,7 +117,7 @@
: collector_(collector), target_space_(target_space),
contains_reference_to_target_space_(contains_reference_to_target_space) {}
- void operator()(mirror::Object* obj) const REQUIRES(Locks::heap_bitmap_lock_)
+ void operator()(ObjPtr<mirror::Object> obj) const REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
RememberedSetReferenceVisitor visitor(target_space_, contains_reference_to_target_space_,
collector_);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index dabb6da..3dee974 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -354,14 +354,14 @@
explicit VerifyGrayImmuneObjectsVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
+ void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, bool /* is_static */)
const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
obj, offset);
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
@@ -386,13 +386,15 @@
private:
ConcurrentCopying* const collector_;
- void CheckReference(mirror::Object* ref, mirror::Object* holder, MemberOffset offset) const
+ void CheckReference(ObjPtr<mirror::Object> ref,
+ ObjPtr<mirror::Object> holder,
+ MemberOffset offset) const
REQUIRES_SHARED(Locks::mutator_lock_) {
if (ref != nullptr) {
- if (!collector_->immune_spaces_.ContainsObject(ref)) {
+ if (!collector_->immune_spaces_.ContainsObject(ref.Ptr())) {
// Not immune, must be a zygote large object.
CHECK(Runtime::Current()->GetHeap()->GetLargeObjectsSpace()->IsZygoteLargeObject(
- Thread::Current(), ref))
+ Thread::Current(), ref.Ptr()))
<< "Non gray object references non immune, non zygote large object "<< ref << " "
<< PrettyTypeOf(ref) << " in holder " << holder << " " << PrettyTypeOf(holder)
<< " offset=" << offset.Uint32Value();
@@ -969,14 +971,17 @@
explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
VerifyNoFromSpaceRefsVisitor visitor(collector_);
visitor(ref);
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
this->operator()(ref, mirror::Reference::ReferentOffset(), false);
@@ -1091,14 +1096,16 @@
explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
AssertToSpaceInvariantRefsVisitor visitor(collector_);
visitor(ref);
}
- void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
}
@@ -1780,13 +1787,13 @@
explicit RefFieldsVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
+ void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, bool /* is_static */)
const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
- collector_->Process(obj, offset);
+ collector_->Process(obj.Ptr(), offset);
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
collector_->DelayReferenceReferent(klass, ref);
@@ -2377,7 +2384,8 @@
return Mark(from_ref);
}
-void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
+void ConcurrentCopying::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference) {
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 81ffbc5..5b8a557 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -169,7 +169,8 @@
void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) OVERRIDE
+ virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference) OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_);
void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 4ffa254..5b51399 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -196,7 +196,8 @@
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
- virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
protected:
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 6d2f009..e0bf744 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -418,7 +418,7 @@
collector_->UpdateHeapReference(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
}
- void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> /*klass*/, mirror::Reference* ref) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
collector_->UpdateHeapReference(
ref->GetFieldObjectReferenceAddr<kVerifyNone>(mirror::Reference::ReferentOffset()));
@@ -543,7 +543,8 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
-void MarkCompact::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
+void MarkCompact::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference) {
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
@@ -551,13 +552,16 @@
public:
explicit MarkObjectVisitor(MarkCompact* collector) : collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool /*is_static*/) const ALWAYS_INLINE
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Object was already verified when we scanned it.
collector_->MarkObject(obj->GetFieldObject<mirror::Object, kVerifyNone>(offset));
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
collector_->DelayReferenceReferent(klass, ref);
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index a61646c..564f85b 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -122,7 +122,7 @@
OVERRIDE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Schedules an unmarked object for reference processing.
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index b89d99c..c05719d 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -272,7 +272,7 @@
explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
: mark_sweep_(mark_sweep) {}
- void operator()(mirror::Object* obj) const
+ void operator()(ObjPtr<mirror::Object> obj) const
ALWAYS_INLINE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -280,7 +280,7 @@
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
}
- mark_sweep_->ScanObject(obj);
+ mark_sweep_->ScanObject(obj.Ptr());
}
private:
@@ -616,7 +616,7 @@
public:
explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
collector_->DelayReferenceReferent(klass, ref);
@@ -1297,9 +1297,9 @@
}
}
-// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
+// Process the "referent" field lin a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
-void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
+void MarkSweep::DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) {
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index bbac9da..19c2e9a 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -225,7 +225,7 @@
}
// Schedules an unmarked object for reference processing.
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 76a478e..2cb1767 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -679,7 +679,8 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
-void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
+void SemiSpace::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference) {
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
@@ -687,13 +688,13 @@
public:
explicit MarkObjectVisitor(SemiSpace* collector) : collector_(collector) {}
- void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
+ void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Object was already verified when we scanned it.
collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
collector_->DelayReferenceReferent(klass, ref);
}
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 4b63d9b..4cebcc3 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -156,7 +156,7 @@
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Schedules an unmarked object for reference processing.
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index bf5af8e..f4a3aea 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1246,9 +1246,6 @@
<< " total=" << seen_backtrace_count_.LoadRelaxed() +
unique_backtrace_count_.LoadRelaxed();
}
- // Delete any still registered allocation listener.
- AllocationListener* l = GetAndOverwriteAllocationListener(&alloc_listener_, nullptr);
- delete l;
VLOG(heap) << "Finished ~Heap()";
}
@@ -2894,19 +2891,21 @@
return fail_count_->LoadSequentiallyConsistent();
}
- void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) {
if (verify_referent_) {
- VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
+ VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset());
}
}
- void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
- VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
+ VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset);
}
- bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ bool IsLive(ObjPtr<mirror::Object> obj) const NO_THREAD_SAFETY_ANALYSIS {
return heap_->IsLiveObjectLocked(obj, true, false, true);
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index a40e408..e9c8b95 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -912,7 +912,7 @@
ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
const {}
- ALWAYS_INLINE void operator()(mirror::Object* obj,
+ ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> obj,
MemberOffset offset,
bool is_static ATTRIBUTE_UNUSED) const
NO_THREAD_SAFETY_ANALYSIS {
@@ -949,7 +949,8 @@
}
// java.lang.ref.Reference visitor.
- void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
mirror::Object* obj = ref->GetReferent<kWithoutReadBarrier>();
ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
@@ -1254,6 +1255,16 @@
}
}
}
+
+ mirror::MethodTypeDexCacheType* method_types = dex_cache->GetResolvedMethodTypes();
+ if (method_types != nullptr) {
+ mirror::MethodTypeDexCacheType* new_method_types =
+ fixup_adapter.ForwardObject(method_types);
+ if (method_types != new_method_types) {
+ dex_cache->SetResolvedMethodTypes(new_method_types);
+ }
+ dex_cache->FixupResolvedMethodTypes<kWithoutReadBarrier>(new_method_types, fixup_adapter);
+ }
}
}
{
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index a73970b..36e8608 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -647,7 +647,15 @@
} else {
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
SetQuickAllocEntryPointsInstrumented(instrumented);
- ResetQuickAllocEntryPoints();
+
+ // Note: ResetQuickAllocEntryPoints only works when the runtime is started. Manually run the
+ // update for just this thread.
+ // Note: self may be null. One of those paths is setting instrumentation in the Heap
+ // constructor for gcstress mode.
+ if (self != nullptr) {
+ ResetQuickAllocEntryPointsForThread(self, nullptr);
+ }
+
alloc_entrypoints_instrumented_ = instrumented;
}
}
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index be061be..a61a187 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -33,8 +33,7 @@
namespace art {
InternTable::InternTable()
- : images_added_to_intern_table_(false),
- log_new_roots_(false),
+ : log_new_roots_(false),
weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
weak_root_state_(gc::kWeakRootStateNormal) {
}
@@ -181,57 +180,8 @@
const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings);
if (section.Size() > 0) {
AddTableFromMemoryLocked(image_space->Begin() + section.Offset());
- } else {
- // TODO: Delete this logic?
- mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches);
- mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
- for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- const size_t num_strings = dex_cache->NumStrings();
- for (size_t j = 0; j < num_strings; ++j) {
- mirror::String* image_string = dex_cache->GetResolvedString(j);
- if (image_string != nullptr) {
- mirror::String* found = LookupStrongLocked(image_string);
- if (found == nullptr) {
- InsertStrong(image_string);
- } else {
- DCHECK_EQ(found, image_string);
- }
- }
- }
- }
}
}
- images_added_to_intern_table_ = true;
-}
-
-mirror::String* InternTable::LookupStringFromImage(mirror::String* s) {
- DCHECK(!images_added_to_intern_table_);
- const std::vector<gc::space::ImageSpace*>& image_spaces =
- Runtime::Current()->GetHeap()->GetBootImageSpaces();
- if (image_spaces.empty()) {
- return nullptr; // No image present.
- }
- const std::string utf8 = s->ToModifiedUtf8();
- for (gc::space::ImageSpace* image_space : image_spaces) {
- mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
- mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
- for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- const DexFile* dex_file = dex_cache->GetDexFile();
- // Binary search the dex file for the string index.
- const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
- if (string_id != nullptr) {
- uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
- // GetResolvedString() contains a RB.
- mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
- if (image_string != nullptr) {
- return image_string;
- }
- }
- }
- }
- return nullptr;
}
void InternTable::BroadcastForNewInterns() {
@@ -303,13 +253,6 @@
}
return weak;
}
- // Check the image for a match.
- if (!images_added_to_intern_table_) {
- mirror::String* const image_string = LookupStringFromImage(s);
- if (image_string != nullptr) {
- return is_strong ? InsertStrong(image_string) : InsertWeak(image_string);
- }
- }
// No match in the strong table or the weak table. Insert into the strong / weak table.
return is_strong ? InsertStrong(s) : InsertWeak(s);
}
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 184fbdc..30ff55d 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -238,8 +238,6 @@
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
// Transaction rollback access.
- mirror::String* LookupStringFromImage(mirror::String* s)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertStrongFromTransaction(mirror::String* s)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertWeakFromTransaction(mirror::String* s)
@@ -260,7 +258,6 @@
void WaitUntilAccessible(Thread* self)
REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
- bool images_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
ConditionVariable weak_intern_condition_ GUARDED_BY(Locks::intern_table_lock_);
// Since this contains (strong) roots, they need a read barrier to
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 4a3654b..4347c37 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1472,13 +1472,17 @@
uint32_t* args, JValue* result) {
int32_t length = args[1];
DCHECK_GE(length, 0);
- mirror::Class* element_class = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
+ ObjPtr<mirror::Class> element_class = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
Runtime* runtime = Runtime::Current();
- mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(self, &element_class);
+ ObjPtr<mirror::Class> array_class =
+ runtime->GetClassLinker()->FindArrayClass(self, &element_class);
DCHECK(array_class != nullptr);
gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
- result->SetL(mirror::Array::Alloc<true, true>(self, array_class, length,
- array_class->GetComponentSizeShift(), allocator));
+ result->SetL(mirror::Array::Alloc<true, true>(self,
+ array_class,
+ length,
+ array_class->GetComponentSizeShift(),
+ allocator));
}
void UnstartedRuntime::UnstartedJNIVMStackGetCallingClassLoader(
@@ -1601,10 +1605,10 @@
ThrowNegativeArraySizeException(length);
return;
}
- mirror::Class* element_class = reinterpret_cast<mirror::Class*>(args[0])->AsClass();
+ ObjPtr<mirror::Class> element_class = reinterpret_cast<mirror::Class*>(args[0])->AsClass();
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
- mirror::Class* array_class = class_linker->FindArrayClass(self, &element_class);
+ ObjPtr<mirror::Class> array_class = class_linker->FindArrayClass(self, &element_class);
if (UNLIKELY(array_class == nullptr)) {
CHECK(self->IsExceptionPending());
return;
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index 6a4add3..b190c81 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -81,20 +81,21 @@
static mirror::ObjectArray<mirror::Object>* CreateObjectArray(
Thread* self,
- mirror::Class* component_type,
+ ObjPtr<mirror::Class> component_type,
const StackHandleScope<3>& data)
REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
- mirror::Class* array_type = runtime->GetClassLinker()->FindArrayClass(self, &component_type);
+ ObjPtr<mirror::Class> array_type =
+ runtime->GetClassLinker()->FindArrayClass(self, &component_type);
CHECK(array_type != nullptr);
- mirror::ObjectArray<mirror::Object>* result =
+ ObjPtr<mirror::ObjectArray<mirror::Object>> result =
mirror::ObjectArray<mirror::Object>::Alloc(self, array_type, 3);
CHECK(result != nullptr);
for (size_t i = 0; i < 3; ++i) {
result->Set(static_cast<int32_t>(i), data.GetReference(i));
CHECK(!self->IsExceptionPending());
}
- return result;
+ return result.Ptr();
}
static void CheckObjectArray(mirror::ObjectArray<mirror::Object>* array,
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 8eebe56..305a7e8 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -1904,11 +1904,12 @@
// Compute the array class corresponding to the given element class.
ScopedObjectAccess soa(env);
- mirror::Class* array_class;
+ ObjPtr<mirror::Class> array_class;
{
- mirror::Class* element_class = soa.Decode<mirror::Class>(element_jclass).Ptr();
+ ObjPtr<mirror::Class> element_class = soa.Decode<mirror::Class>(element_jclass).Ptr();
if (UNLIKELY(element_class->IsPrimitive())) {
- soa.Vm()->JniAbortF("NewObjectArray", "not an object type: %s",
+ soa.Vm()->JniAbortF("NewObjectArray",
+ "not an object type: %s",
PrettyDescriptor(element_class).c_str());
return nullptr;
}
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index d18781a..3789081 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -150,8 +150,11 @@
};
template <bool kIsInstrumented, bool kFillUsable>
-inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
- size_t component_size_shift, gc::AllocatorType allocator_type) {
+inline Array* Array::Alloc(Thread* self,
+ ObjPtr<Class> array_class,
+ int32_t component_count,
+ size_t component_size_shift,
+ gc::AllocatorType allocator_type) {
DCHECK(allocator_type != gc::kAllocatorTypeLOS);
DCHECK(array_class != nullptr);
DCHECK(array_class->IsArrayClass());
@@ -204,7 +207,9 @@
template<typename T>
inline PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
- Array* raw_array = Array::Alloc<true>(self, GetArrayClass(), length,
+ Array* raw_array = Array::Alloc<true>(self,
+ GetArrayClass(),
+ length,
ComponentSizeShiftWidth(sizeof(T)),
Runtime::Current()->GetHeap()->GetCurrentAllocator());
return down_cast<PrimitiveArray<T>*>(raw_array);
@@ -275,7 +280,9 @@
}
template<class T>
-inline void PrimitiveArray<T>::Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos,
+inline void PrimitiveArray<T>::Memmove(int32_t dst_pos,
+ ObjPtr<PrimitiveArray<T>> src,
+ int32_t src_pos,
int32_t count) {
if (UNLIKELY(count == 0)) {
return;
@@ -335,7 +342,9 @@
}
template<class T>
-inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos,
+inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos,
+ ObjPtr<PrimitiveArray<T>> src,
+ int32_t src_pos,
int32_t count) {
if (UNLIKELY(count == 0)) {
return;
@@ -415,6 +424,13 @@
}
}
+template<typename T>
+inline void PrimitiveArray<T>::SetArrayClass(ObjPtr<Class> array_class) {
+ CHECK(array_class_.IsNull());
+ CHECK(array_class != nullptr);
+ array_class_ = GcRoot<Class>(array_class);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 1aa38dd..8afa4aa 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -60,7 +60,7 @@
for (int32_t i = 0; i < array_length; i++) {
StackHandleScope<1> hs2(self);
Handle<mirror::Class> h_component_type(hs2.NewHandle(array_class->GetComponentType()));
- Array* sub_array = RecursiveCreateMultiArray(self, h_component_type,
+ ObjPtr<Array> sub_array = RecursiveCreateMultiArray(self, h_component_type,
current_dimension + 1, dimensions);
if (UNLIKELY(sub_array == nullptr)) {
CHECK(self->IsExceptionPending());
@@ -93,7 +93,7 @@
// Find/generate the array class.
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- mirror::Class* element_class_ptr = element_class.Get();
+ ObjPtr<mirror::Class> element_class_ptr = element_class.Get();
StackHandleScope<1> hs(self);
MutableHandle<mirror::Class> array_class(
hs.NewHandle(class_linker->FindArrayClass(self, &element_class_ptr)));
@@ -102,7 +102,7 @@
return nullptr;
}
for (int32_t i = 1; i < dimensions->GetLength(); ++i) {
- mirror::Class* array_class_ptr = array_class.Get();
+ ObjPtr<mirror::Class> array_class_ptr = array_class.Get();
array_class.Assign(class_linker->FindArrayClass(self, &array_class_ptr));
if (UNLIKELY(array_class.Get() == nullptr)) {
CHECK(self->IsExceptionPending());
@@ -110,11 +110,11 @@
}
}
// Create the array.
- Array* new_array = RecursiveCreateMultiArray(self, array_class, 0, dimensions);
+ ObjPtr<Array> new_array = RecursiveCreateMultiArray(self, array_class, 0, dimensions);
if (UNLIKELY(new_array == nullptr)) {
CHECK(self->IsExceptionPending());
}
- return new_array;
+ return new_array.Ptr();
}
void Array::ThrowArrayIndexOutOfBoundsException(int32_t index) {
@@ -136,12 +136,13 @@
heap->GetCurrentNonMovingAllocator();
const auto component_size = GetClass()->GetComponentSize();
const auto component_shift = GetClass()->GetComponentSizeShift();
- Array* new_array = Alloc<true>(self, GetClass(), new_length, component_shift, allocator_type);
+ ObjPtr<Array> new_array = Alloc<true>(self, GetClass(), new_length, component_shift, allocator_type);
if (LIKELY(new_array != nullptr)) {
- memcpy(new_array->GetRawData(component_size, 0), h_this->GetRawData(component_size, 0),
+ memcpy(new_array->GetRawData(component_size, 0),
+ h_this->GetRawData(component_size, 0),
std::min(h_this->GetLength(), new_length) << component_shift);
}
- return new_array;
+ return new_array.Ptr();
}
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 04d02f7..994e9b2 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -39,13 +39,19 @@
// least component_count size, however, if there's usable space at the end of the allocation the
// array will fill it.
template <bool kIsInstrumented, bool kFillUsable = false>
- ALWAYS_INLINE static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
- size_t component_size_shift, gc::AllocatorType allocator_type)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ ALWAYS_INLINE static Array* Alloc(Thread* self,
+ ObjPtr<Class> array_class,
+ int32_t component_count,
+ size_t component_size_shift,
+ gc::AllocatorType allocator_type)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
- static Array* CreateMultiArray(Thread* self, Handle<Class> element_class,
+ static Array* CreateMultiArray(Thread* self,
+ Handle<Class> element_class,
Handle<IntArray> dimensions)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
@@ -147,7 +153,7 @@
* smaller than element size copies). Arguments are assumed to be within the bounds of the array
* and the arrays non-null.
*/
- void Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
+ void Memmove(int32_t dst_pos, ObjPtr<PrimitiveArray<T>> src, int32_t src_pos, int32_t count)
REQUIRES_SHARED(Locks::mutator_lock_);
/*
@@ -155,14 +161,10 @@
* smaller than element size copies). Arguments are assumed to be within the bounds of the array
* and the arrays non-null.
*/
- void Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
+ void Memcpy(int32_t dst_pos, ObjPtr<PrimitiveArray<T>> src, int32_t src_pos, int32_t count)
REQUIRES_SHARED(Locks::mutator_lock_);
- static void SetArrayClass(Class* array_class) {
- CHECK(array_class_.IsNull());
- CHECK(array_class != nullptr);
- array_class_ = GcRoot<Class>(array_class);
- }
+ static void SetArrayClass(ObjPtr<Class> array_class);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
static Class* GetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/class_loader-inl.h b/runtime/mirror/class_loader-inl.h
index cc910b0..f5ecdae 100644
--- a/runtime/mirror/class_loader-inl.h
+++ b/runtime/mirror/class_loader-inl.h
@@ -21,6 +21,7 @@
#include "base/mutex-inl.h"
#include "class_table-inl.h"
+#include "obj_ptr-inl.h"
namespace art {
namespace mirror {
@@ -29,7 +30,7 @@
VerifyObjectFlags kVerifyFlags,
ReadBarrierOption kReadBarrierOption,
typename Visitor>
-inline void ClassLoader::VisitReferences(mirror::Class* klass, const Visitor& visitor) {
+inline void ClassLoader::VisitReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor) {
// Visit instance fields first.
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
if (kVisitClasses) {
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index 407678a..a62a460 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -67,7 +67,7 @@
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitReferences(mirror::Class* klass, const Visitor& visitor)
+ void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::classlinker_classes_lock_);
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index b388f65..be849a3 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -27,6 +27,7 @@
#include "mirror/class.h"
#include "mirror/method_type.h"
#include "runtime.h"
+#include "obj_ptr.h"
#include <atomic>
@@ -43,8 +44,8 @@
return StringDexCachePair::Lookup(GetStrings(), string_idx, NumStrings()).Read();
}
-inline void DexCache::SetResolvedString(uint32_t string_idx, mirror::String* resolved) {
- StringDexCachePair::Assign(GetStrings(), string_idx, resolved, NumStrings());
+inline void DexCache::SetResolvedString(uint32_t string_idx, ObjPtr<mirror::String> resolved) {
+ StringDexCachePair::Assign(GetStrings(), string_idx, resolved.Ptr(), NumStrings());
Runtime* const runtime = Runtime::Current();
if (UNLIKELY(runtime->IsActiveTransaction())) {
DCHECK(runtime->IsAotCompiler());
@@ -72,7 +73,7 @@
return GetResolvedTypes()[type_idx].Read();
}
-inline void DexCache::SetResolvedType(uint32_t type_idx, Class* resolved) {
+inline void DexCache::SetResolvedType(uint32_t type_idx, ObjPtr<Class> resolved) {
DCHECK_LT(type_idx, NumResolvedTypes()); // NOTE: Unchecked, i.e. not throwing AIOOB.
// TODO default transaction support.
GetResolvedTypes()[type_idx] = GcRoot<Class>(resolved);
@@ -162,7 +163,7 @@
VerifyObjectFlags kVerifyFlags,
ReadBarrierOption kReadBarrierOption,
typename Visitor>
-inline void DexCache::VisitReferences(mirror::Class* klass, const Visitor& visitor) {
+inline void DexCache::VisitReferences(ObjPtr<Class> klass, const Visitor& visitor) {
// Visit instance fields first.
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
// Visit arrays after.
@@ -207,6 +208,19 @@
}
}
+template <ReadBarrierOption kReadBarrierOption, typename Visitor>
+inline void DexCache::FixupResolvedMethodTypes(mirror::MethodTypeDexCacheType* dest,
+ const Visitor& visitor) {
+ mirror::MethodTypeDexCacheType* src = GetResolvedMethodTypes();
+ for (size_t i = 0, count = NumResolvedMethodTypes(); i < count; ++i) {
+ MethodTypeDexCachePair source = src[i].load(std::memory_order_relaxed);
+ mirror::MethodType* ptr = source.object.Read<kReadBarrierOption>();
+ mirror::MethodType* new_source = visitor(ptr);
+ source.object = GcRoot<MethodType>(new_source);
+ dest[i].store(source, std::memory_order_relaxed);
+ }
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 2fcabb5..d728f90 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -160,6 +160,10 @@
void FixupResolvedTypes(GcRoot<mirror::Class>* dest, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
+ template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
+ void FixupResolvedMethodTypes(MethodTypeDexCacheType* dest, const Visitor& visitor)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
String* GetLocation() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
}
@@ -211,7 +215,7 @@
mirror::String* GetResolvedString(uint32_t string_idx) ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_);
- void SetResolvedString(uint32_t string_idx, mirror::String* resolved) ALWAYS_INLINE
+ void SetResolvedString(uint32_t string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_);
// Clear a string for a string_idx, used to undo string intern transactions to make sure
@@ -220,7 +224,8 @@
Class* GetResolvedType(uint32_t type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetResolvedType(uint32_t type_idx, Class* resolved) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetResolvedType(uint32_t type_idx, ObjPtr<Class> resolved)
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -282,7 +287,7 @@
MethodTypeDexCacheType* GetResolvedMethodTypes()
ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldPtr<MethodTypeDexCacheType*>(ResolvedMethodTypesOffset());
+ return GetFieldPtr64<MethodTypeDexCacheType*>(ResolvedMethodTypesOffset());
}
void SetResolvedMethodTypes(MethodTypeDexCacheType* resolved_method_types)
@@ -337,7 +342,7 @@
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitReferences(mirror::Class* klass, const Visitor& visitor)
+ void VisitReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
HeapReference<Object> dex_;
diff --git a/runtime/mirror/executable.cc b/runtime/mirror/executable.cc
index 33ebd81..17c16a2 100644
--- a/runtime/mirror/executable.cc
+++ b/runtime/mirror/executable.cc
@@ -32,14 +32,10 @@
return true;
}
-template bool Executable::CreateFromArtMethod<PointerSize::k32, false>(
- ArtMethod* method);
-template bool Executable::CreateFromArtMethod<PointerSize::k32, true>(
- ArtMethod* method);
-template bool Executable::CreateFromArtMethod<PointerSize::k64, false>(
- ArtMethod* method);
-template bool Executable::CreateFromArtMethod<PointerSize::k64, true>(
- ArtMethod* method);
+template bool Executable::CreateFromArtMethod<PointerSize::k32, false>(ArtMethod* method);
+template bool Executable::CreateFromArtMethod<PointerSize::k32, true>(ArtMethod* method);
+template bool Executable::CreateFromArtMethod<PointerSize::k64, false>(ArtMethod* method);
+template bool Executable::CreateFromArtMethod<PointerSize::k64, true>(ArtMethod* method);
ArtMethod* Executable::GetArtMethod() {
return reinterpret_cast<ArtMethod*>(GetField64(ArtMethodOffset()));
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index adc5107..c03f20a 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -79,10 +79,15 @@
}
template<bool kTransactionActive>
-void Field::SetDeclaringClass(ObjPtr<mirror::Class> c) {
+inline void Field::SetDeclaringClass(ObjPtr<mirror::Class> c) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c);
}
+template<bool kTransactionActive>
+inline void Field::SetType(ObjPtr<mirror::Class> type) {
+ SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, type_), type);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc
index 65f6b16..f6b6489 100644
--- a/runtime/mirror/field.cc
+++ b/runtime/mirror/field.cc
@@ -27,7 +27,7 @@
GcRoot<Class> Field::static_class_;
GcRoot<Class> Field::array_class_;
-void Field::SetClass(Class* klass) {
+void Field::SetClass(ObjPtr<Class> klass) {
CHECK(static_class_.IsNull()) << static_class_.Read() << " " << klass;
CHECK(klass != nullptr);
static_class_ = GcRoot<Class>(klass);
@@ -38,7 +38,7 @@
static_class_ = GcRoot<Class>(nullptr);
}
-void Field::SetArrayClass(Class* klass) {
+void Field::SetArrayClass(ObjPtr<Class> klass) {
CHECK(array_class_.IsNull()) << array_class_.Read() << " " << klass;
CHECK(klass != nullptr);
array_class_ = GcRoot<Class>(klass);
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index c5357c9..222d709 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -83,10 +83,10 @@
return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_));
}
- static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void SetClass(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static void SetArrayClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void SetArrayClass(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
static void ResetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -113,9 +113,7 @@
void SetDeclaringClass(ObjPtr<mirror::Class> c) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetType(mirror::Class* type) REQUIRES_SHARED(Locks::mutator_lock_) {
- SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, type_), type);
- }
+ void SetType(ObjPtr<mirror::Class> type) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
void SetAccessFlags(uint32_t flags) REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/iftable-inl.h b/runtime/mirror/iftable-inl.h
index b465d07..d6191c2 100644
--- a/runtime/mirror/iftable-inl.h
+++ b/runtime/mirror/iftable-inl.h
@@ -18,11 +18,12 @@
#define ART_RUNTIME_MIRROR_IFTABLE_INL_H_
#include "iftable.h"
+#include "obj_ptr-inl.h"
namespace art {
namespace mirror {
-inline void IfTable::SetInterface(int32_t i, Class* interface) {
+inline void IfTable::SetInterface(int32_t i, ObjPtr<Class> interface) {
DCHECK(interface != nullptr);
DCHECK(interface->IsInterface());
const size_t idx = i * kMax + kInterface;
@@ -30,6 +31,13 @@
SetWithoutChecks<false>(idx, interface);
}
+inline void IfTable::SetMethodArray(int32_t i, ObjPtr<PointerArray> arr) {
+ DCHECK(arr != nullptr);
+ auto idx = i * kMax + kMethodArray;
+ DCHECK(Get(idx) == nullptr);
+ Set<false>(idx, arr);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index a1a2f98..296c163 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -31,7 +31,7 @@
return interface;
}
- ALWAYS_INLINE void SetInterface(int32_t i, Class* interface)
+ ALWAYS_INLINE void SetInterface(int32_t i, ObjPtr<Class> interface)
REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -51,12 +51,7 @@
return method_array == nullptr ? 0u : method_array->GetLength();
}
- void SetMethodArray(int32_t i, PointerArray* arr) REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(arr != nullptr);
- auto idx = i * kMax + kMethodArray;
- DCHECK(Get(idx) == nullptr);
- Set<false>(idx, arr);
- }
+ void SetMethodArray(int32_t i, ObjPtr<PointerArray> arr) REQUIRES_SHARED(Locks::mutator_lock_);
size_t Count() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetLength() / kMax;
diff --git a/runtime/mirror/method_type_test.cc b/runtime/mirror/method_type_test.cc
index a968bff..03ab930 100644
--- a/runtime/mirror/method_type_test.cc
+++ b/runtime/mirror/method_type_test.cc
@@ -52,7 +52,7 @@
soa.Self(), FullyQualifiedType(return_type).c_str(), boot_class_loader));
CHECK(return_clazz.Get() != nullptr);
- mirror::Class* class_type = mirror::Class::GetJavaLangClass();
+ ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass();
mirror::Class* class_array_type = class_linker->FindArrayClass(self, &class_type);
Handle<mirror::ObjectArray<mirror::Class>> param_classes = hs.NewHandle(
mirror::ObjectArray<mirror::Class>::Alloc(self, class_array_type, param_types.size()));
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 3e7bca7..f555c80 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -392,8 +392,8 @@
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsIntArray() {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- mirror::Class* klass = GetClass<kVerifyFlags, kReadBarrierOption>();
- mirror::Class* component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
}
@@ -406,8 +406,8 @@
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsLongArray() {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- mirror::Class* klass = GetClass<kVerifyFlags, kReadBarrierOption>();
- mirror::Class* component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
}
@@ -1117,7 +1117,7 @@
// There is no reference offset bitmap. In the non-static case, walk up the class
// inheritance hierarchy and find reference offsets the hard way. In the static case, just
// consider this class.
- for (mirror::Class* klass = kIsStatic
+ for (ObjPtr<Class> klass = kIsStatic
? AsClass<kVerifyFlags, kReadBarrierOption>()
: GetClass<kVerifyFlags, kReadBarrierOption>();
klass != nullptr;
@@ -1146,13 +1146,13 @@
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void Object::VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) {
+inline void Object::VisitInstanceFieldsReferences(ObjPtr<Class> klass, const Visitor& visitor) {
VisitFieldsReferences<false, kVerifyFlags, kReadBarrierOption>(
klass->GetReferenceInstanceOffsets<kVerifyFlags>(), visitor);
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void Object::VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) {
+inline void Object::VisitStaticFieldsReferences(ObjPtr<Class> klass, const Visitor& visitor) {
DCHECK(!klass->IsTemp());
klass->VisitFieldsReferences<true, kVerifyFlags, kReadBarrierOption>(0, visitor);
}
@@ -1186,7 +1186,7 @@
typename JavaLangRefVisitor>
inline void Object::VisitReferences(const Visitor& visitor,
const JavaLangRefVisitor& ref_visitor) {
- mirror::Class* klass = GetClass<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
visitor(this, ClassOffset(), false);
const uint32_t class_flags = klass->GetClassFlags<kVerifyNone>();
if (LIKELY(class_flags == kClassFlagNormal)) {
@@ -1201,7 +1201,7 @@
DCHECK(!klass->IsStringClass());
if (class_flags == kClassFlagClass) {
DCHECK((klass->IsClassClass<kVerifyFlags, kReadBarrierOption>()));
- mirror::Class* as_klass = AsClass<kVerifyNone, kReadBarrierOption>();
+ ObjPtr<Class> as_klass = AsClass<kVerifyNone, kReadBarrierOption>();
as_klass->VisitReferences<kVisitNativeRoots, kVerifyFlags, kReadBarrierOption>(klass,
visitor);
} else if (class_flags == kClassFlagObjectArray) {
@@ -1228,7 +1228,7 @@
// actual string instances.
if (!klass->IsStringClass()) {
size_t total_reference_instance_fields = 0;
- mirror::Class* super_class = klass;
+ ObjPtr<Class> super_class = klass;
do {
total_reference_instance_fields += super_class->NumReferenceInstanceFields();
super_class = super_class->GetSuperClass<kVerifyFlags, kReadBarrierOption>();
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index daee727..7e92c53 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -49,7 +49,7 @@
void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const
ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
// GetFieldObject() contains a RB.
- Object* ref = obj->GetFieldObject<Object>(offset);
+ ObjPtr<Object> ref = obj->GetFieldObject<Object>(offset);
// No WB here as a large object space does not have a card table
// coverage. Instead, cards will be marked separately.
dest_obj_->SetFieldObjectWithoutWriteBarrier<false, false>(offset, ref);
@@ -118,7 +118,7 @@
}
gc::Heap* heap = Runtime::Current()->GetHeap();
// Perform write barriers on copied object references.
- Class* c = src->GetClass();
+ ObjPtr<Class> c = src->GetClass();
if (c->IsArrayClass()) {
if (!c->GetComponentType()->IsPrimitive()) {
ObjectArray<Object>* array = dest->AsObjectArray<Object>();
@@ -182,8 +182,8 @@
hash_code_seed.StoreRelaxed(new_seed);
}
-int32_t Object::IdentityHashCode() const {
- mirror::Object* current_this = const_cast<mirror::Object*>(this);
+int32_t Object::IdentityHashCode() {
+ ObjPtr<Object> current_this = this; // The this pointer may get invalidated by thread suspension.
while (true) {
LockWord lw = current_this->GetLockWord(false);
switch (lw.GetState()) {
@@ -192,7 +192,7 @@
// loop iteration.
LockWord hash_word = LockWord::FromHashCode(GenerateIdentityHashCode(), lw.GCState());
DCHECK_EQ(hash_word.GetState(), LockWord::kHashCode);
- if (const_cast<Object*>(this)->CasLockWordWeakRelaxed(lw, hash_word)) {
+ if (current_this->CasLockWordWeakRelaxed(lw, hash_word)) {
return hash_word.GetHashCode();
}
break;
@@ -227,13 +227,13 @@
}
void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, ObjPtr<Object> new_value) {
- Class* c = GetClass();
+ ObjPtr<Class> c = GetClass();
Runtime* runtime = Runtime::Current();
if (runtime->GetClassLinker() == nullptr || !runtime->IsStarted() ||
!runtime->GetHeap()->IsObjectValidationEnabled() || !c->IsResolved()) {
return;
}
- for (Class* cur = c; cur != nullptr; cur = cur->GetSuperClass()) {
+ for (ObjPtr<Class> cur = c; cur != nullptr; cur = cur->GetSuperClass()) {
for (ArtField& field : cur->GetIFields()) {
StackHandleScope<1> hs(Thread::Current());
Handle<Object> h_object(hs.NewHandle(new_value));
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 84aa96c..13f4028 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -130,9 +130,10 @@
Object* Clone(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
- int32_t IdentityHashCode() const
+ int32_t IdentityHashCode()
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_,
+ !Locks::thread_suspend_count_lock_);
static MemberOffset MonitorOffset() {
return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
@@ -578,12 +579,12 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
+ void VisitInstanceFieldsReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor) HOT_ATTR
REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
+ void VisitStaticFieldsReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor) HOT_ATTR
REQUIRES_SHARED(Locks::mutator_lock_);
private:
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index e2050cc..c00cf91 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -28,7 +28,7 @@
GcRoot<Class> StackTraceElement::java_lang_StackTraceElement_;
-void StackTraceElement::SetClass(Class* java_lang_StackTraceElement) {
+void StackTraceElement::SetClass(ObjPtr<Class> java_lang_StackTraceElement) {
CHECK(java_lang_StackTraceElement_.IsNull());
CHECK(java_lang_StackTraceElement != nullptr);
java_lang_StackTraceElement_ = GcRoot<Class>(java_lang_StackTraceElement);
@@ -39,30 +39,34 @@
java_lang_StackTraceElement_ = GcRoot<Class>(nullptr);
}
-StackTraceElement* StackTraceElement::Alloc(Thread* self, Handle<String> declaring_class,
- Handle<String> method_name, Handle<String> file_name,
+StackTraceElement* StackTraceElement::Alloc(Thread* self,
+ Handle<String> declaring_class,
+ Handle<String> method_name,
+ Handle<String> file_name,
int32_t line_number) {
ObjPtr<StackTraceElement> trace =
ObjPtr<StackTraceElement>::DownCast(GetStackTraceElement()->AllocObject(self));
if (LIKELY(trace != nullptr)) {
if (Runtime::Current()->IsActiveTransaction()) {
- trace->Init<true>(declaring_class, method_name, file_name, line_number);
+ trace->Init<true>(declaring_class.Get(), method_name.Get(), file_name.Get(), line_number);
} else {
- trace->Init<false>(declaring_class, method_name, file_name, line_number);
+ trace->Init<false>(declaring_class.Get(), method_name.Get(), file_name.Get(), line_number);
}
}
return trace.Ptr();
}
template<bool kTransactionActive>
-void StackTraceElement::Init(Handle<String> declaring_class, Handle<String> method_name,
- Handle<String> file_name, int32_t line_number) {
+void StackTraceElement::Init(ObjPtr<String> declaring_class,
+ ObjPtr<String> method_name,
+ ObjPtr<String> file_name,
+ int32_t line_number) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_),
- declaring_class.Get());
+ declaring_class);
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_),
- method_name.Get());
+ method_name);
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_),
- file_name.Get());
+ file_name);
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_),
line_number);
}
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 4b3d9d0..d32d8dc 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -47,12 +47,14 @@
return GetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_));
}
- static StackTraceElement* Alloc(Thread* self, Handle<String> declaring_class,
- Handle<String> method_name, Handle<String> file_name,
+ static StackTraceElement* Alloc(Thread* self,
+ Handle<String> declaring_class,
+ Handle<String> method_name,
+ Handle<String> file_name,
int32_t line_number)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- static void SetClass(Class* java_lang_StackTraceElement);
+ static void SetClass(ObjPtr<Class> java_lang_StackTraceElement);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -69,7 +71,9 @@
int32_t line_number_;
template<bool kTransactionActive>
- void Init(Handle<String> declaring_class, Handle<String> method_name, Handle<String> file_name,
+ void Init(ObjPtr<String> declaring_class,
+ ObjPtr<String> method_name,
+ ObjPtr<String> file_name,
int32_t line_number)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 46caa4d..ed1103f 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -48,7 +48,7 @@
}
}
-void String::SetClass(Class* java_lang_String) {
+void String::SetClass(ObjPtr<Class> java_lang_String) {
CHECK(java_lang_String_.IsNull());
CHECK(java_lang_String != nullptr);
CHECK(java_lang_String->IsStringClass());
@@ -93,12 +93,13 @@
int32_t length = string->GetLength();
int32_t length2 = string2->GetLength();
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
- const bool compressible = kUseStringCompression && (string->IsCompressed() && string2->IsCompressed());
- const int32_t length_with_flag = (compressible) ? String::GetFlaggedCount(length + length2)
- : (length + length2);
+ const bool compressible = kUseStringCompression &&
+ (string->IsCompressed() && string2->IsCompressed());
+ const int32_t length_with_flag = compressible ? String::GetFlaggedCount(length + length2)
+ : (length + length2);
SetStringCountVisitor visitor(length_with_flag);
- String* new_string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
+ ObjPtr<String> new_string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
if (UNLIKELY(new_string == nullptr)) {
return nullptr;
}
@@ -123,7 +124,7 @@
memcpy(new_value + length, string2->GetValue(), length2 * sizeof(uint16_t));
}
}
- return new_string;
+ return new_string.Ptr();
}
String* String::AllocFromUtf16(Thread* self, int32_t utf16_length, const uint16_t* utf16_data_in) {
@@ -134,7 +135,7 @@
int32_t length_with_flag = (compressible) ? String::GetFlaggedCount(utf16_length)
: utf16_length;
SetStringCountVisitor visitor(length_with_flag);
- String* string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
+ ObjPtr<String> string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
if (UNLIKELY(string == nullptr)) {
return nullptr;
}
@@ -146,7 +147,7 @@
uint16_t* array = string->GetValue();
memcpy(array, utf16_data_in, utf16_length * sizeof(uint16_t));
}
- return string;
+ return string.Ptr();
}
String* String::AllocFromModifiedUtf8(Thread* self, const char* utf) {
@@ -156,18 +157,22 @@
return AllocFromModifiedUtf8(self, char_count, utf, byte_count);
}
-String* String::AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, const char* utf8_data_in) {
+String* String::AllocFromModifiedUtf8(Thread* self,
+ int32_t utf16_length,
+ const char* utf8_data_in) {
return AllocFromModifiedUtf8(self, utf16_length, utf8_data_in, strlen(utf8_data_in));
}
-String* String::AllocFromModifiedUtf8(Thread* self, int32_t utf16_length,
- const char* utf8_data_in, int32_t utf8_length) {
+String* String::AllocFromModifiedUtf8(Thread* self,
+ int32_t utf16_length,
+ const char* utf8_data_in,
+ int32_t utf8_length) {
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
const bool compressible = kUseStringCompression && (utf16_length == utf8_length);
const int32_t utf16_length_with_flag = (compressible) ? String::GetFlaggedCount(utf16_length)
: utf16_length;
SetStringCountVisitor visitor(utf16_length_with_flag);
- String* string = Alloc<true>(self, utf16_length_with_flag, allocator_type, visitor);
+ ObjPtr<String> string = Alloc<true>(self, utf16_length_with_flag, allocator_type, visitor);
if (UNLIKELY(string == nullptr)) {
return nullptr;
}
@@ -177,10 +182,10 @@
uint16_t* utf16_data_out = string->GetValue();
ConvertModifiedUtf8ToUtf16(utf16_data_out, utf16_length, utf8_data_in, utf8_length);
}
- return string;
+ return string.Ptr();
}
-bool String::Equals(String* that) {
+bool String::Equals(ObjPtr<String> that) {
if (this == that) {
// Quick reference equality test
return true;
@@ -281,9 +286,9 @@
return result;
}
-int32_t String::CompareTo(String* rhs) {
+int32_t String::CompareTo(ObjPtr<String> rhs) {
// Quick test for comparison of a string with itself.
- String* lhs = this;
+ ObjPtr<String> lhs = this;
if (lhs == rhs) {
return 0;
}
@@ -298,7 +303,9 @@
int32_t countDiff = lhsCount - rhsCount;
int32_t minCount = (countDiff < 0) ? lhsCount : rhsCount;
if (lhs->IsCompressed() && rhs->IsCompressed()) {
- int32_t comparison = memcmp(lhs->GetValueCompressed(), rhs->GetValueCompressed(), minCount * sizeof(uint8_t));
+ int32_t comparison = memcmp(lhs->GetValueCompressed(),
+ rhs->GetValueCompressed(),
+ minCount * sizeof(uint8_t));
if (comparison != 0) {
return comparison;
}
@@ -326,7 +333,7 @@
CharArray* String::ToCharArray(Thread* self) {
StackHandleScope<1> hs(self);
Handle<String> string(hs.NewHandle(this));
- CharArray* result = CharArray::Alloc(self, GetLength());
+ ObjPtr<CharArray> result = CharArray::Alloc(self, GetLength());
if (result != nullptr) {
if (string->IsCompressed()) {
int32_t length = string->GetLength();
@@ -339,7 +346,7 @@
} else {
self->AssertPendingOOMException();
}
- return result;
+ return result.Ptr();
}
void String::GetChars(int32_t start, int32_t end, Handle<CharArray> array, int32_t index) {
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index a18692f..cfb1153 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -146,7 +146,7 @@
bool Equals(const StringPiece& modified_utf8)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool Equals(String* that) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool Equals(ObjPtr<String> that) REQUIRES_SHARED(Locks::mutator_lock_);
// Compare UTF-16 code point values not in a locale-sensitive manner
int Compare(int32_t utf16_length, const char* utf8_data_in);
@@ -165,7 +165,7 @@
int32_t FastIndexOf(MemoryType* chars, int32_t ch, int32_t start)
REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t CompareTo(String* other) REQUIRES_SHARED(Locks::mutator_lock_);
+ int32_t CompareTo(ObjPtr<String> other) REQUIRES_SHARED(Locks::mutator_lock_);
CharArray* ToCharArray(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
@@ -200,7 +200,7 @@
return java_lang_String_.Read();
}
- static void SetClass(Class* java_lang_String) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void SetClass(ObjPtr<Class> java_lang_String) REQUIRES_SHARED(Locks::mutator_lock_);
static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index 8f3ed84..7aff3de 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -33,7 +33,7 @@
GcRoot<Class> Throwable::java_lang_Throwable_;
-void Throwable::SetDetailMessage(String* new_detail_message) {
+void Throwable::SetDetailMessage(ObjPtr<String> new_detail_message) {
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), new_detail_message);
} else {
@@ -42,7 +42,7 @@
}
}
-void Throwable::SetCause(Throwable* cause) {
+void Throwable::SetCause(ObjPtr<Throwable> cause) {
CHECK(cause != nullptr);
CHECK(cause != this);
Throwable* current_cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_));
@@ -54,7 +54,7 @@
}
}
-void Throwable::SetStackState(Object* state) REQUIRES_SHARED(Locks::mutator_lock_) {
+void Throwable::SetStackState(ObjPtr<Object> state) REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(state != nullptr);
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObjectVolatile<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_), state);
@@ -71,11 +71,11 @@
}
int32_t Throwable::GetStackDepth() {
- Object* stack_state = GetStackState();
+ ObjPtr<Object> stack_state = GetStackState();
if (stack_state == nullptr || !stack_state->IsObjectArray()) {
return -1;
}
- mirror::ObjectArray<mirror::Object>* const trace = stack_state->AsObjectArray<mirror::Object>();
+ ObjPtr<mirror::ObjectArray<Object>> const trace = stack_state->AsObjectArray<Object>();
const int32_t array_len = trace->GetLength();
DCHECK_GT(array_len, 0);
// See method BuildInternalStackTraceVisitor::Init for the format.
@@ -85,22 +85,21 @@
std::string Throwable::Dump() {
std::string result(PrettyTypeOf(this));
result += ": ";
- String* msg = GetDetailMessage();
+ ObjPtr<String> msg = GetDetailMessage();
if (msg != nullptr) {
result += msg->ToModifiedUtf8();
}
result += "\n";
- Object* stack_state = GetStackState();
+ ObjPtr<Object> stack_state = GetStackState();
// check stack state isn't missing or corrupt
if (stack_state != nullptr && stack_state->IsObjectArray()) {
- mirror::ObjectArray<mirror::Object>* object_array =
- stack_state->AsObjectArray<mirror::Object>();
+ ObjPtr<ObjectArray<Object>> object_array = stack_state->AsObjectArray<Object>();
// Decode the internal stack trace into the depth and method trace
// See method BuildInternalStackTraceVisitor::Init for the format.
DCHECK_GT(object_array->GetLength(), 0);
- mirror::Object* methods_and_dex_pcs = object_array->Get(0);
+ ObjPtr<Object> methods_and_dex_pcs = object_array->Get(0);
DCHECK(methods_and_dex_pcs->IsIntArray() || methods_and_dex_pcs->IsLongArray());
- mirror::PointerArray* method_trace = down_cast<mirror::PointerArray*>(methods_and_dex_pcs);
+ ObjPtr<PointerArray> method_trace = ObjPtr<PointerArray>::DownCast(methods_and_dex_pcs);
const int32_t array_len = method_trace->GetLength();
CHECK_EQ(array_len % 2, 0);
const auto depth = array_len / 2;
@@ -118,11 +117,12 @@
}
}
} else {
- Object* stack_trace = GetStackTrace();
+ ObjPtr<Object> stack_trace = GetStackTrace();
if (stack_trace != nullptr && stack_trace->IsObjectArray()) {
CHECK_EQ(stack_trace->GetClass()->GetComponentType(),
StackTraceElement::GetStackTraceElement());
- auto* ste_array = down_cast<ObjectArray<StackTraceElement>*>(stack_trace);
+ ObjPtr<ObjectArray<StackTraceElement>> ste_array =
+ ObjPtr<ObjectArray<StackTraceElement>>::DownCast(stack_trace);
if (ste_array->GetLength() == 0) {
result += "(Throwable with empty stack trace)";
} else {
@@ -142,7 +142,7 @@
result += "(Throwable with no stack trace)";
}
}
- Throwable* cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_));
+ ObjPtr<Throwable> cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_));
if (cause != nullptr && cause != this) { // Constructor makes cause == this by default.
result += "Caused by: ";
result += cause->Dump();
@@ -150,7 +150,7 @@
return result;
}
-void Throwable::SetClass(Class* java_lang_Throwable) {
+void Throwable::SetClass(ObjPtr<Class> java_lang_Throwable) {
CHECK(java_lang_Throwable_.IsNull());
CHECK(java_lang_Throwable != nullptr);
java_lang_Throwable_ = GcRoot<Class>(java_lang_Throwable);
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index 76824cb..0a4ab6f 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -31,7 +31,7 @@
// C++ mirror of java.lang.Throwable
class MANAGED Throwable : public Object {
public:
- void SetDetailMessage(String* new_detail_message) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetDetailMessage(ObjPtr<String> new_detail_message) REQUIRES_SHARED(Locks::mutator_lock_);
String* GetDetailMessage() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_));
@@ -42,8 +42,8 @@
// This is a runtime version of initCause, you shouldn't use it if initCause may have been
// overridden. Also it asserts rather than throwing exceptions. Currently this is only used
// in cases like the verifier where the checks cannot fail and initCause isn't overridden.
- void SetCause(Throwable* cause) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetStackState(Object* state) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetCause(ObjPtr<Throwable> cause) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetStackState(ObjPtr<Object> state) REQUIRES_SHARED(Locks::mutator_lock_);
bool IsCheckedException() REQUIRES_SHARED(Locks::mutator_lock_);
static Class* GetJavaLangThrowable() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -53,7 +53,7 @@
int32_t GetStackDepth() REQUIRES_SHARED(Locks::mutator_lock_);
- static void SetClass(Class* java_lang_Throwable);
+ static void SetClass(ObjPtr<Class> java_lang_Throwable);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index 0d1839b..1dd60f8 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -21,7 +21,7 @@
#include <sys/stat.h>
#include <sys/types.h>
-#include <android/log.h>
+#include <log/log.h>
#define EVENT_LOG_TAG_dvm_lock_sample 20003
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 086da60..df0849a 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -218,7 +218,7 @@
{
ScopedObjectAccess soa(env);
ObjPtr<mirror::Object> dex_files_object = soa.Decode<mirror::Object>(cookie);
- mirror::LongArray* long_dex_files = dex_files_object->AsLongArray();
+ ObjPtr<mirror::LongArray> long_dex_files = dex_files_object->AsLongArray();
// Delete dex files associated with this dalvik.system.DexFile since there should not be running
// code using it. dex_files is a vector due to multidex.
ClassLinker* const class_linker = runtime->GetClassLinker();
@@ -279,15 +279,15 @@
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader>(javaLoader)));
class_linker->RegisterDexFile(*dex_file, class_loader.Get());
- mirror::Class* result = class_linker->DefineClass(soa.Self(),
- descriptor.c_str(),
- hash,
- class_loader,
- *dex_file,
- *dex_class_def);
+ ObjPtr<mirror::Class> result = class_linker->DefineClass(soa.Self(),
+ descriptor.c_str(),
+ hash,
+ class_loader,
+ *dex_file,
+ *dex_class_def);
// Add the used dex file. This only required for the DexFile.loadClass API since normal
// class loaders already keep their dex files live.
- class_linker->InsertDexFileInToClassLoader(soa.Decode<mirror::Object>(dexFile).Ptr(),
+ class_linker->InsertDexFileInToClassLoader(soa.Decode<mirror::Object>(dexFile),
class_loader.Get());
if (result != nullptr) {
VLOG(class_linker) << "DexFile_defineClassNative returning " << result
diff --git a/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc
index e32545b..db245aa 100644
--- a/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc
+++ b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc
@@ -150,14 +150,18 @@
Handle<mirror::ClassLoader> class_loader(
handle_scope.NewHandle(soa.Decode<mirror::ClassLoader>(loader)));
class_linker->RegisterDexFile(*dex_file, class_loader.Get());
- mirror::Class* result = class_linker->DefineClass(
- soa.Self(), class_descriptor, hash, class_loader, *dex_file, *dex_class_def);
+ ObjPtr<mirror::Class> result = class_linker->DefineClass(
+ soa.Self(),
+ class_descriptor,
+ hash, class_loader,
+ *dex_file,
+ *dex_class_def);
if (result != nullptr) {
// Ensure the class table has a strong reference to the
// InMemoryClassLoader/DexData instance now that a class has
// been loaded.
- class_linker->InsertDexFileInToClassLoader(
- soa.Decode<mirror::Object>(dexData).Ptr(), class_loader.Get());
+ class_linker->InsertDexFileInToClassLoader(soa.Decode<mirror::Object>(dexData),
+ class_loader.Get());
return soa.AddLocalReference<jclass>(result);
}
}
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index e458e2d..888fddb 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -74,21 +74,23 @@
ThrowNegativeArraySizeException(length);
return nullptr;
}
- mirror::Class* element_class = soa.Decode<mirror::Class>(javaElementClass).Ptr();
+ ObjPtr<mirror::Class> element_class = soa.Decode<mirror::Class>(javaElementClass);
if (UNLIKELY(element_class == nullptr)) {
ThrowNullPointerException("element class == null");
return nullptr;
}
Runtime* runtime = Runtime::Current();
- mirror::Class* array_class =
+ ObjPtr<mirror::Class> array_class =
runtime->GetClassLinker()->FindArrayClass(soa.Self(), &element_class);
if (UNLIKELY(array_class == nullptr)) {
return nullptr;
}
gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentNonMovingAllocator();
- mirror::Array* result = mirror::Array::Alloc<true>(soa.Self(), array_class, length,
- array_class->GetComponentSizeShift(),
- allocator);
+ ObjPtr<mirror::Array> result = mirror::Array::Alloc<true>(soa.Self(),
+ array_class,
+ length,
+ array_class->GetComponentSizeShift(),
+ allocator);
return soa.AddLocalReference<jobject>(result);
}
@@ -99,21 +101,24 @@
ThrowNegativeArraySizeException(length);
return nullptr;
}
- mirror::Class* element_class = soa.Decode<mirror::Class>(javaElementClass).Ptr();
+ ObjPtr<mirror::Class> element_class = soa.Decode<mirror::Class>(javaElementClass);
if (UNLIKELY(element_class == nullptr)) {
ThrowNullPointerException("element class == null");
return nullptr;
}
Runtime* runtime = Runtime::Current();
- mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(soa.Self(),
- &element_class);
+ ObjPtr<mirror::Class> array_class = runtime->GetClassLinker()->FindArrayClass(soa.Self(),
+ &element_class);
if (UNLIKELY(array_class == nullptr)) {
return nullptr;
}
gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
- mirror::Array* result = mirror::Array::Alloc<true, true>(soa.Self(), array_class, length,
- array_class->GetComponentSizeShift(),
- allocator);
+ ObjPtr<mirror::Array> result = mirror::Array::Alloc<true, true>(
+ soa.Self(),
+ array_class,
+ length,
+ array_class->GetComponentSizeShift(),
+ allocator);
return soa.AddLocalReference<jobject>(result);
}
@@ -127,7 +132,7 @@
ThrowIllegalArgumentException("not an array");
return 0;
}
- if (Runtime::Current()->GetHeap()->IsMovableObject(array.Ptr())) {
+ if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
ThrowRuntimeException("Trying to get address of movable array object");
return 0;
}
@@ -263,7 +268,7 @@
Runtime::Current()->GetHeap()->GetTaskProcessor()->RunAllTasks(ThreadForEnv(env));
}
-typedef std::map<std::string, mirror::String*> StringTable;
+typedef std::map<std::string, ObjPtr<mirror::String>> StringTable;
class PreloadDexCachesStringsVisitor : public SingleRootVisitor {
public:
@@ -271,7 +276,7 @@
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
- mirror::String* string = root->AsString();
+ ObjPtr<mirror::String> string = root->AsString();
table_->operator[](string->ToModifiedUtf8()) = string;
}
@@ -283,7 +288,7 @@
static void PreloadDexCachesResolveString(
Handle<mirror::DexCache> dex_cache, uint32_t string_idx, StringTable& strings)
REQUIRES_SHARED(Locks::mutator_lock_) {
- mirror::String* string = dex_cache->GetResolvedString(string_idx);
+ ObjPtr<mirror::String> string = dex_cache->GetResolvedString(string_idx);
if (string != nullptr) {
return;
}
@@ -298,10 +303,11 @@
}
// Based on ClassLinker::ResolveType.
-static void PreloadDexCachesResolveType(
- Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
+static void PreloadDexCachesResolveType(Thread* self,
+ ObjPtr<mirror::DexCache> dex_cache,
+ uint32_t type_idx)
REQUIRES_SHARED(Locks::mutator_lock_) {
- mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
+ ObjPtr<mirror::Class> klass = dex_cache->GetResolvedType(type_idx);
if (klass != nullptr) {
return;
}
@@ -364,7 +370,7 @@
}
const DexFile* dex_file = dex_cache->GetDexFile();
const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
- mirror::Class* klass = dex_cache->GetResolvedType(method_id.class_idx_);
+ ObjPtr<mirror::Class> klass = dex_cache->GetResolvedType(method_id.class_idx_);
if (klass == nullptr) {
return;
}
@@ -439,25 +445,25 @@
Thread* const self = Thread::Current();
for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
CHECK(dex_file != nullptr);
- mirror::DexCache* const dex_cache = class_linker->FindDexCache(self, *dex_file, true);
+ ObjPtr<mirror::DexCache> const dex_cache = class_linker->FindDexCache(self, *dex_file, true);
// If dex cache was deallocated, just continue.
if (dex_cache == nullptr) {
continue;
}
for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
- mirror::String* string = dex_cache->GetResolvedString(j);
+ ObjPtr<mirror::String> string = dex_cache->GetResolvedString(j);
if (string != nullptr) {
filled->num_strings++;
}
}
for (size_t j = 0; j < dex_cache->NumResolvedTypes(); j++) {
- mirror::Class* klass = dex_cache->GetResolvedType(j);
+ ObjPtr<mirror::Class> klass = dex_cache->GetResolvedType(j);
if (klass != nullptr) {
filled->num_types++;
}
}
for (size_t j = 0; j < dex_cache->NumResolvedFields(); j++) {
- ArtField* field = class_linker->GetResolvedField(j, dex_cache);
+ ArtField* field = class_linker->GetResolvedField(j, dex_cache.Ptr());
if (field != nullptr) {
filled->num_fields++;
}
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 0dd8cdd..36825cb 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -87,10 +87,10 @@
bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(class_loader == nullptr);
- mirror::Class* c = GetMethod()->GetDeclaringClass();
+ ObjPtr<mirror::Class> c = GetMethod()->GetDeclaringClass();
// c is null for runtime methods.
if (c != nullptr) {
- mirror::Object* cl = c->GetClassLoader();
+ ObjPtr<mirror::Object> cl = c->GetClassLoader();
if (cl != nullptr) {
class_loader = cl;
return false;
@@ -99,7 +99,7 @@
return true;
}
- mirror::Object* class_loader;
+ ObjPtr<mirror::Object> class_loader;
};
ScopedFastNativeObjectAccess soa(env);
ClosestUserClassLoaderVisitor visitor(soa.Self());
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index ceb37c4..ac5dbda 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -497,13 +497,13 @@
// Pending exception from GetDeclaredClasses.
return nullptr;
}
- mirror::Class* class_class = mirror::Class::GetJavaLangClass();
- mirror::Class* class_array_class =
+ ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass();
+ ObjPtr<mirror::Class> class_array_class =
Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
if (class_array_class == nullptr) {
return nullptr;
}
- mirror::ObjectArray<mirror::Class>* empty_array =
+ ObjPtr<mirror::ObjectArray<mirror::Class>> empty_array =
mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class, 0);
return soa.AddLocalReference<jobjectArray>(empty_array);
}
@@ -527,7 +527,7 @@
if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
return nullptr;
}
- mirror::Object* method = annotations::GetEnclosingMethod(klass);
+ ObjPtr<mirror::Object> method = annotations::GetEnclosingMethod(klass);
if (method != nullptr) {
if (soa.Decode<mirror::Class>(WellKnownClasses::java_lang_reflect_Constructor) ==
method->GetClass()) {
@@ -544,7 +544,7 @@
if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
return nullptr;
}
- mirror::Object* method = annotations::GetEnclosingMethod(klass);
+ ObjPtr<mirror::Object> method = annotations::GetEnclosingMethod(klass);
if (method != nullptr) {
if (soa.Decode<mirror::Class>(WellKnownClasses::java_lang_reflect_Method) ==
method->GetClass()) {
@@ -660,7 +660,7 @@
// Invoke the string allocator to return an empty string for the string class.
if (klass->IsStringClass()) {
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
- mirror::Object* obj = mirror::String::AllocEmptyString<true>(soa.Self(), allocator_type);
+ ObjPtr<mirror::Object> obj = mirror::String::AllocEmptyString<true>(soa.Self(), allocator_type);
if (UNLIKELY(soa.Self()->IsExceptionPending())) {
return nullptr;
} else {
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index 1fd7ed1..71379a5 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -68,7 +68,7 @@
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
CHECK_LT(static_cast<size_t>(type_index), dex_cache->NumResolvedTypes());
- dex_cache->SetResolvedType(type_index, soa.Decode<mirror::Class>(type).Ptr());
+ dex_cache->SetResolvedType(type_index, soa.Decode<mirror::Class>(type));
}
static void DexCache_setResolvedString(JNIEnv* env, jobject javaDexCache, jint string_index,
@@ -76,7 +76,7 @@
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds());
- dex_cache->SetResolvedString(string_index, soa.Decode<mirror::String>(string).Ptr());
+ dex_cache->SetResolvedString(string_index, soa.Decode<mirror::String>(string));
}
static JNINativeMethod gMethods[] = {
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index 5a49c20..ea266d1 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -57,7 +57,8 @@
int32_t length_this = string_this->GetLength();
int32_t length_arg = string_arg->GetLength();
if (length_arg > 0 && length_this > 0) {
- mirror::String* result = mirror::String::AllocFromStrings(soa.Self(), string_this, string_arg);
+ ObjPtr<mirror::String> result =
+ mirror::String::AllocFromStrings(soa.Self(), string_this, string_arg);
return soa.AddLocalReference<jstring>(result);
}
jobject string_original = (length_this == 0) ? java_string_arg : java_this;
@@ -76,8 +77,11 @@
StackHandleScope<1> hs(soa.Self());
Handle<mirror::String> string_this(hs.NewHandle(soa.Decode<mirror::String>(java_this)));
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
- mirror::String* result = mirror::String::AllocFromString<true>(soa.Self(), length, string_this,
- start, allocator_type);
+ ObjPtr<mirror::String> result = mirror::String::AllocFromString<true>(soa.Self(),
+ length,
+ string_this,
+ start,
+ allocator_type);
return soa.AddLocalReference<jstring>(result);
}
diff --git a/runtime/native/java_lang_StringFactory.cc b/runtime/native/java_lang_StringFactory.cc
index 119f2b8..e0738a4 100644
--- a/runtime/native/java_lang_StringFactory.cc
+++ b/runtime/native/java_lang_StringFactory.cc
@@ -44,9 +44,12 @@
return nullptr;
}
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
- mirror::String* result = mirror::String::AllocFromByteArray<true>(soa.Self(), byte_count,
- byte_array, offset, high,
- allocator_type);
+ ObjPtr<mirror::String> result = mirror::String::AllocFromByteArray<true>(soa.Self(),
+ byte_count,
+ byte_array,
+ offset,
+ high,
+ allocator_type);
return soa.AddLocalReference<jstring>(result);
}
@@ -58,9 +61,11 @@
StackHandleScope<1> hs(soa.Self());
Handle<mirror::CharArray> char_array(hs.NewHandle(soa.Decode<mirror::CharArray>(java_data)));
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
- mirror::String* result = mirror::String::AllocFromCharArray<true>(soa.Self(), char_count,
- char_array, offset,
- allocator_type);
+ ObjPtr<mirror::String> result = mirror::String::AllocFromCharArray<true>(soa.Self(),
+ char_count,
+ char_array,
+ offset,
+ allocator_type);
return soa.AddLocalReference<jstring>(result);
}
@@ -73,8 +78,11 @@
StackHandleScope<1> hs(soa.Self());
Handle<mirror::String> string(hs.NewHandle(soa.Decode<mirror::String>(to_copy)));
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
- mirror::String* result = mirror::String::AllocFromString<true>(soa.Self(), string->GetLength(),
- string, 0, allocator_type);
+ ObjPtr<mirror::String> result = mirror::String::AllocFromString<true>(soa.Self(),
+ string->GetLength(),
+ string,
+ 0,
+ allocator_type);
return soa.AddLocalReference<jstring>(result);
}
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index 3f5fa73..eaf2d65 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -71,8 +71,8 @@
ThrowArrayStoreException_NotAnArray("destination", dstObject);
return;
}
- mirror::Array* srcArray = srcObject->AsArray();
- mirror::Array* dstArray = dstObject->AsArray();
+ ObjPtr<mirror::Array> srcArray = srcObject->AsArray();
+ ObjPtr<mirror::Array> dstArray = dstObject->AsArray();
// Bounds checking.
if (UNLIKELY(srcPos < 0) || UNLIKELY(dstPos < 0) || UNLIKELY(count < 0) ||
@@ -85,8 +85,8 @@
return;
}
- mirror::Class* dstComponentType = dstArray->GetClass()->GetComponentType();
- mirror::Class* srcComponentType = srcArray->GetClass()->GetComponentType();
+ ObjPtr<mirror::Class> dstComponentType = dstArray->GetClass()->GetComponentType();
+ ObjPtr<mirror::Class> srcComponentType = srcArray->GetClass()->GetComponentType();
Primitive::Type dstComponentPrimitiveType = dstComponentType->GetPrimitiveType();
if (LIKELY(srcComponentType == dstComponentType)) {
@@ -143,8 +143,10 @@
return;
}
// Arrays hold distinct types and so therefore can't alias - use memcpy instead of memmove.
- mirror::ObjectArray<mirror::Object>* dstObjArray = dstArray->AsObjectArray<mirror::Object>();
- mirror::ObjectArray<mirror::Object>* srcObjArray = srcArray->AsObjectArray<mirror::Object>();
+ ObjPtr<mirror::ObjectArray<mirror::Object>> dstObjArray =
+ dstArray->AsObjectArray<mirror::Object>();
+ ObjPtr<mirror::ObjectArray<mirror::Object>> srcObjArray =
+ srcArray->AsObjectArray<mirror::Object>();
// If we're assigning into say Object[] then we don't need per element checks.
if (dstComponentType->IsAssignableFrom(srcComponentType)) {
dstObjArray->AssignableMemcpy(dstPos, srcObjArray, srcPos, count);
@@ -157,8 +159,9 @@
// Template to convert general array to that of its specific primitive type.
template <typename T>
-inline T* AsPrimitiveArray(mirror::Array* array) {
- return down_cast<T*>(array);
+inline ObjPtr<T> AsPrimitiveArray(ObjPtr<mirror::Array> array)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return ObjPtr<T>::DownCast(array);
}
template <typename T, Primitive::Type kPrimType>
@@ -168,8 +171,8 @@
ObjPtr<mirror::Object> srcObject = soa.Decode<mirror::Object>(javaSrc);
ObjPtr<mirror::Object> dstObject = soa.Decode<mirror::Object>(javaDst);
DCHECK(dstObject != nullptr);
- mirror::Array* srcArray = srcObject->AsArray();
- mirror::Array* dstArray = dstObject->AsArray();
+ ObjPtr<mirror::Array> srcArray = srcObject->AsArray();
+ ObjPtr<mirror::Array> dstArray = dstObject->AsArray();
DCHECK_GE(count, 0);
DCHECK_EQ(srcArray->GetClass(), dstArray->GetClass());
DCHECK_EQ(srcArray->GetClass()->GetComponentType()->GetPrimitiveType(), kPrimType);
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index 73d12f1..1fe89bf 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -37,24 +37,24 @@
ClassLinker* cl = Runtime::Current()->GetClassLinker();
std::string descriptor(DotToDescriptor(name.c_str()));
const size_t descriptor_hash = ComputeModifiedUtf8Hash(descriptor.c_str());
- mirror::Class* c = cl->LookupClass(soa.Self(),
- descriptor.c_str(),
- descriptor_hash,
- loader.Ptr());
+ ObjPtr<mirror::Class> c = cl->LookupClass(soa.Self(),
+ descriptor.c_str(),
+ descriptor_hash,
+ loader.Ptr());
if (c != nullptr && c->IsResolved()) {
return soa.AddLocalReference<jclass>(c);
}
// If class is erroneous, throw the earlier failure, wrapped in certain cases. See b/28787733.
if (c != nullptr && c->IsErroneous()) {
- cl->ThrowEarlierClassFailure(c);
+ cl->ThrowEarlierClassFailure(c.Ptr());
Thread* self = soa.Self();
- mirror::Class* eiie_class =
+ ObjPtr<mirror::Class> eiie_class =
self->DecodeJObject(WellKnownClasses::java_lang_ExceptionInInitializerError)->AsClass();
- mirror::Class* iae_class =
+ ObjPtr<mirror::Class> iae_class =
self->DecodeJObject(WellKnownClasses::java_lang_IllegalAccessError)->AsClass();
- mirror::Class* ncdfe_class =
+ ObjPtr<mirror::Class> ncdfe_class =
self->DecodeJObject(WellKnownClasses::java_lang_NoClassDefFoundError)->AsClass();
- mirror::Class* exception = self->GetException()->GetClass();
+ ObjPtr<mirror::Class> exception = self->GetException()->GetClass();
if (exception == eiie_class || exception == iae_class || exception == ncdfe_class) {
self->ThrowNewWrappedException("Ljava/lang/ClassNotFoundException;",
PrettyDescriptor(c).c_str());
diff --git a/runtime/native/java_lang_ref_FinalizerReference.cc b/runtime/native/java_lang_ref_FinalizerReference.cc
index 1f03c7c..c7d06f4 100644
--- a/runtime/native/java_lang_ref_FinalizerReference.cc
+++ b/runtime/native/java_lang_ref_FinalizerReference.cc
@@ -28,8 +28,7 @@
static jboolean FinalizerReference_makeCircularListIfUnenqueued(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::FinalizerReference> ref = soa.Decode<mirror::FinalizerReference>(javaThis);
- return Runtime::Current()->GetHeap()->GetReferenceProcessor()->MakeCircularListIfUnenqueued(
- ref.Ptr());
+ return Runtime::Current()->GetHeap()->GetReferenceProcessor()->MakeCircularListIfUnenqueued(ref);
}
static JNINativeMethod gMethods[] = {
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index 6f2da33..d827f81 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -40,8 +40,9 @@
DCHECK_EQ(dimensions_obj->GetClass()->GetComponentType()->GetPrimitiveType(),
Primitive::kPrimInt);
Handle<mirror::IntArray> dimensions_array(
- hs.NewHandle(down_cast<mirror::IntArray*>(dimensions_obj.Ptr())));
- mirror::Array* new_array = mirror::Array::CreateMultiArray(soa.Self(), element_class,
+ hs.NewHandle(ObjPtr<mirror::IntArray>::DownCast(dimensions_obj)));
+ mirror::Array* new_array = mirror::Array::CreateMultiArray(soa.Self(),
+ element_class,
dimensions_array);
return soa.AddLocalReference<jobject>(new_array);
}
@@ -53,17 +54,20 @@
ThrowNegativeArraySizeException(length);
return nullptr;
}
- mirror::Class* element_class = soa.Decode<mirror::Class>(javaElementClass).Ptr();
+ ObjPtr<mirror::Class> element_class = soa.Decode<mirror::Class>(javaElementClass);
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
- mirror::Class* array_class = class_linker->FindArrayClass(soa.Self(), &element_class);
+ ObjPtr<mirror::Class> array_class = class_linker->FindArrayClass(soa.Self(), &element_class);
if (UNLIKELY(array_class == nullptr)) {
CHECK(soa.Self()->IsExceptionPending());
return nullptr;
}
DCHECK(array_class->IsObjectArrayClass());
- mirror::Array* new_array = mirror::ObjectArray<mirror::Object*>::Alloc(
- soa.Self(), array_class, length, runtime->GetHeap()->GetCurrentAllocator());
+ ObjPtr<mirror::Array> new_array = mirror::ObjectArray<mirror::Object*>::Alloc(
+ soa.Self(),
+ array_class,
+ length,
+ runtime->GetHeap()->GetCurrentAllocator());
return soa.AddLocalReference<jobject>(new_array);
}
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 505f85d..a81ba7d 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -39,13 +39,13 @@
annotations::GetExceptionTypesForMethod(method);
if (result_array == nullptr) {
// Return an empty array instead of a null pointer.
- mirror::Class* class_class = mirror::Class::GetJavaLangClass();
- mirror::Class* class_array_class =
+ ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass();
+ ObjPtr<mirror::Class> class_array_class =
Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
if (class_array_class == nullptr) {
return nullptr;
}
- mirror::ObjectArray<mirror::Class>* empty_array =
+ ObjPtr<mirror::ObjectArray<mirror::Class>> empty_array =
mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class, 0);
return soa.AddLocalReference<jobjectArray>(empty_array);
} else {
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index b5f2f7c..a6589bc 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -44,7 +44,7 @@
ScopedFastNativeObjectAccess soa(env);
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
if (method->GetDeclaringClass()->IsProxyClass()) {
- mirror::Class* klass = method->GetDeclaringClass();
+ ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
int throws_index = -1;
size_t i = 0;
for (const auto& m : klass->GetDeclaredVirtualMethods(kRuntimePointerSize)) {
@@ -62,8 +62,8 @@
annotations::GetExceptionTypesForMethod(method);
if (result_array == nullptr) {
// Return an empty array instead of a null pointer
- mirror::Class* class_class = mirror::Class::GetJavaLangClass();
- mirror::Class* class_array_class =
+ ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass();
+ ObjPtr<mirror::Class> class_array_class =
Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
if (class_array_class == nullptr) {
return nullptr;
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 670c4ac..cdf4b14 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -305,7 +305,8 @@
}
template<typename T>
-static void copyToArray(jlong srcAddr, mirror::PrimitiveArray<T>* array,
+static void copyToArray(jlong srcAddr,
+ ObjPtr<mirror::PrimitiveArray<T>> array,
size_t array_offset,
size_t size)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -318,7 +319,8 @@
}
template<typename T>
-static void copyFromArray(jlong dstAddr, mirror::PrimitiveArray<T>* array,
+static void copyFromArray(jlong dstAddr,
+ ObjPtr<mirror::PrimitiveArray<T>> array,
size_t array_offset,
size_t size)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -347,15 +349,15 @@
size_t sz = (size_t)size;
size_t dst_offset = (size_t)dstOffset;
ObjPtr<mirror::Object> dst = soa.Decode<mirror::Object>(dstObj);
- mirror::Class* component_type = dst->GetClass()->GetComponentType();
+ ObjPtr<mirror::Class> component_type = dst->GetClass()->GetComponentType();
if (component_type->IsPrimitiveByte() || component_type->IsPrimitiveBoolean()) {
- copyToArray(srcAddr, dst->AsByteSizedArray(), dst_offset, sz);
+ copyToArray(srcAddr, MakeObjPtr(dst->AsByteSizedArray()), dst_offset, sz);
} else if (component_type->IsPrimitiveShort() || component_type->IsPrimitiveChar()) {
- copyToArray(srcAddr, dst->AsShortSizedArray(), dst_offset, sz);
+ copyToArray(srcAddr, MakeObjPtr(dst->AsShortSizedArray()), dst_offset, sz);
} else if (component_type->IsPrimitiveInt() || component_type->IsPrimitiveFloat()) {
- copyToArray(srcAddr, dst->AsIntArray(), dst_offset, sz);
+ copyToArray(srcAddr, MakeObjPtr(dst->AsIntArray()), dst_offset, sz);
} else if (component_type->IsPrimitiveLong() || component_type->IsPrimitiveDouble()) {
- copyToArray(srcAddr, dst->AsLongArray(), dst_offset, sz);
+ copyToArray(srcAddr, MakeObjPtr(dst->AsLongArray()), dst_offset, sz);
} else {
ThrowIllegalAccessException("not a primitive array");
}
@@ -378,15 +380,15 @@
size_t sz = (size_t)size;
size_t src_offset = (size_t)srcOffset;
ObjPtr<mirror::Object> src = soa.Decode<mirror::Object>(srcObj);
- mirror::Class* component_type = src->GetClass()->GetComponentType();
+ ObjPtr<mirror::Class> component_type = src->GetClass()->GetComponentType();
if (component_type->IsPrimitiveByte() || component_type->IsPrimitiveBoolean()) {
- copyFromArray(dstAddr, src->AsByteSizedArray(), src_offset, sz);
+ copyFromArray(dstAddr, MakeObjPtr(src->AsByteSizedArray()), src_offset, sz);
} else if (component_type->IsPrimitiveShort() || component_type->IsPrimitiveChar()) {
- copyFromArray(dstAddr, src->AsShortSizedArray(), src_offset, sz);
+ copyFromArray(dstAddr, MakeObjPtr(src->AsShortSizedArray()), src_offset, sz);
} else if (component_type->IsPrimitiveInt() || component_type->IsPrimitiveFloat()) {
- copyFromArray(dstAddr, src->AsIntArray(), src_offset, sz);
+ copyFromArray(dstAddr, MakeObjPtr(src->AsIntArray()), src_offset, sz);
} else if (component_type->IsPrimitiveLong() || component_type->IsPrimitiveDouble()) {
- copyFromArray(dstAddr, src->AsLongArray(), src_offset, sz);
+ copyFromArray(dstAddr, MakeObjPtr(src->AsLongArray()), src_offset, sz);
} else {
ThrowIllegalAccessException("not a primitive array");
}
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index f164a92..c14b616 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -959,7 +959,7 @@
DCHECK(!error_msg->empty());
return false;
}
- bool loaded = elf_file_->Load(executable, low_4gb, error_msg);
+ bool loaded = elf_file_->Load(file, executable, low_4gb, error_msg);
DCHECK(loaded || !error_msg->empty());
return loaded;
}
diff --git a/runtime/openjdkjvmti/events.cc b/runtime/openjdkjvmti/events.cc
index 450f85e..59e01ea 100644
--- a/runtime/openjdkjvmti/events.cc
+++ b/runtime/openjdkjvmti/events.cc
@@ -40,6 +40,8 @@
#include "mirror/object.h"
#include "runtime.h"
#include "ScopedLocalRef.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
namespace openjdkjvmti {
@@ -172,6 +174,10 @@
};
static void SetupObjectAllocationTracking(art::gc::AllocationListener* listener, bool enable) {
+ // We must not hold the mutator lock here, but if we're in FastJNI, for example, we might. For
+ // now, do a workaround: (possibly) acquire and release.
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ art::ScopedThreadSuspension sts(soa.Self(), art::ThreadState::kSuspended);
if (enable) {
art::Runtime::Current()->GetHeap()->SetAllocationListener(listener);
} else {
@@ -213,6 +219,8 @@
return ERR(INVALID_EVENT_TYPE);
}
+ bool old_state = global_mask.Test(event);
+
if (mode == JVMTI_ENABLE) {
env->event_masks.EnableEvent(thread, event);
global_mask.Set(event);
@@ -233,8 +241,12 @@
global_mask.Set(event, union_value);
}
+ bool new_state = global_mask.Test(event);
+
// Handle any special work required for the event type.
- HandleEventType(event, mode == JVMTI_ENABLE);
+ if (new_state != old_state) {
+ HandleEventType(event, mode == JVMTI_ENABLE);
+ }
return ERR(NONE);
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 9c0d2db..6e15c38 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -147,6 +147,10 @@
#include "verifier/method_verifier.h"
#include "well_known_classes.h"
+#ifdef ART_TARGET_ANDROID
+#include <android/set_abort_message.h>
+#endif
+
namespace art {
// If a signal isn't handled properly, enable a handler that attempts to dump the Java stack.
@@ -495,7 +499,7 @@
bool Runtime::ParseOptions(const RuntimeOptions& raw_options,
bool ignore_unrecognized,
RuntimeArgumentMap* runtime_options) {
- InitLogging(/* argv */ nullptr); // Calls Locks::Init() as a side effect.
+ InitLogging(/* argv */ nullptr, Aborter); // Calls Locks::Init() as a side effect.
bool parsed = ParsedOptions::Parse(raw_options, ignore_unrecognized, runtime_options);
if (!parsed) {
LOG(ERROR) << "Failed to parse options";
@@ -829,7 +833,7 @@
if (file.get() == nullptr) {
return false;
}
- std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.release(),
+ std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.get(),
false /* writable */,
false /* program_header_only */,
false /* low_4gb */,
@@ -1165,10 +1169,6 @@
ScopedTrace trace2("AddImageStringsToTable");
GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces());
}
- {
- ScopedTrace trace2("MoveImageClassesToClassTable");
- GetClassLinker()->AddBootImageClassesToClassTable();
- }
} else {
std::vector<std::string> dex_filenames;
Split(boot_class_path_string_, ':', &dex_filenames);
@@ -2099,4 +2099,12 @@
}
}
+NO_RETURN
+void Runtime::Aborter(const char* abort_message) {
+#ifdef __ANDROID__
+ android_set_abort_message(abort_message);
+#endif
+ Runtime::Abort(abort_message);
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 66fd058..e2ba262 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -662,6 +662,9 @@
void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
+ NO_RETURN
+ static void Aborter(const char* abort_message);
+
private:
static void InitPlatformSignalHandlers();
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index 5ccd446..5ca7684 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -39,7 +39,7 @@
fields_offset_(
RoundUp(strings_offset_ + StringsSize(header.string_ids_size_), FieldsAlignment())),
method_types_offset_(
- RoundUp(fields_offset_ + FieldsSize(header.field_ids_size_), Alignment())),
+ RoundUp(fields_offset_ + FieldsSize(header.field_ids_size_), MethodTypesAlignment())),
size_(
RoundUp(method_types_offset_ + MethodTypesSize(header.proto_ids_size_), Alignment())) {
}
@@ -51,7 +51,11 @@
inline constexpr size_t DexCacheArraysLayout::Alignment() {
// GcRoot<> alignment is 4, i.e. lower than or equal to the pointer alignment.
static_assert(alignof(GcRoot<mirror::Class>) == 4, "Expecting alignof(GcRoot<>) == 4");
- static_assert(alignof(mirror::StringDexCacheType) == 8, "Expecting alignof(StringDexCacheType) == 8");
+ static_assert(alignof(mirror::StringDexCacheType) == 8,
+ "Expecting alignof(StringDexCacheType) == 8");
+ static_assert(alignof(mirror::MethodTypeDexCacheType) == 8,
+ "Expecting alignof(MethodTypeDexCacheType) == 8");
+ // This is the same as alignof(MethodTypeDexCacheType).
return alignof(mirror::StringDexCacheType);
}
@@ -120,12 +124,6 @@
return static_cast<size_t>(pointer_size_);
}
-inline size_t DexCacheArraysLayout::MethodTypeOffset(uint32_t proto_idx) const {
- return strings_offset_
- + ElementOffset(PointerSize::k64,
- proto_idx % mirror::DexCache::kDexCacheMethodTypeCacheSize);
-}
-
inline size_t DexCacheArraysLayout::MethodTypesSize(size_t num_elements) const {
size_t cache_size = mirror::DexCache::kDexCacheMethodTypeCacheSize;
if (num_elements < cache_size) {
diff --git a/runtime/utils/dex_cache_arrays_layout.h b/runtime/utils/dex_cache_arrays_layout.h
index e222b46..ae3bfab 100644
--- a/runtime/utils/dex_cache_arrays_layout.h
+++ b/runtime/utils/dex_cache_arrays_layout.h
@@ -99,8 +99,6 @@
return method_types_offset_;
}
- size_t MethodTypeOffset(uint32_t method_type_idx) const;
-
size_t MethodTypesSize(size_t num_elements) const;
size_t MethodTypesAlignment() const;
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index a84668b..626d9cf 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -748,7 +748,7 @@
DCHECK(result->IsObjectClass());
return result;
}
- mirror::Class* common_elem = ClassJoin(s_ct, t_ct);
+ ObjPtr<mirror::Class> common_elem = ClassJoin(s_ct, t_ct);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
mirror::Class* array_class = class_linker->FindArrayClass(Thread::Current(), &common_elem);
DCHECK(array_class != nullptr);
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index 89875d7..6b0dedf 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -717,6 +717,33 @@
return sumWithFilter(array, filter);
}
+ private static int mI = 0;
+ private static float mF = 0f;
+
+ /// CHECK-START: float Main.testAllocationEliminationWithLoops() load_store_elimination (before)
+ /// CHECK: NewInstance
+ /// CHECK: NewInstance
+ /// CHECK: NewInstance
+
+ /// CHECK-START: float Main.testAllocationEliminationWithLoops() load_store_elimination (after)
+ /// CHECK-NOT: NewInstance
+
+ private static float testAllocationEliminationWithLoops() {
+ for (int i0 = 0; i0 < 5; i0++) {
+ for (int i1 = 0; i1 < 5; i1++) {
+ for (int i2 = 0; i2 < 5; i2++) {
+ int lI0 = ((int) new Integer(((int) new Integer(mI))));
+ if (((boolean) new Boolean(false))) {
+ for (int i3 = 576 - 1; i3 >= 0; i3--) {
+ mF -= 976981405.0f;
+ }
+ }
+ }
+ }
+ }
+ return 1.0f;
+ }
+
static void assertIntEquals(int result, int expected) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
@@ -779,6 +806,8 @@
assertIntEquals($noinline$testHSelect(true), 0xdead);
int[] array = {2, 5, 9, -1, -3, 10, 8, 4};
assertIntEquals(sumWithinRange(array, 1, 5), 11);
+ assertFloatEquals(testAllocationEliminationWithLoops(), 1.0f);
+ assertFloatEquals(mF, 0f);
}
static boolean sFlag;
diff --git a/test/618-checker-induction/src/Main.java b/test/618-checker-induction/src/Main.java
index 5c789cd..b5606bd 100644
--- a/test/618-checker-induction/src/Main.java
+++ b/test/618-checker-induction/src/Main.java
@@ -31,6 +31,26 @@
}
}
+ /// CHECK-START: void Main.deadSingleLoop() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:{{B\d+}} outer_loop:none
+ //
+ /// CHECK-START: void Main.deadSingleLoop() loop_optimization (after)
+ /// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:none
+ static void deadSingleLoopN(int n) {
+ for (int i = 0; i < n; i++) {
+ }
+ }
+
+ /// CHECK-START: void Main.potentialInfiniteLoop(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:{{B\d+}} outer_loop:none
+ //
+ /// CHECK-START: void Main.potentialInfiniteLoop(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:{{B\d+}} outer_loop:none
+ static void potentialInfiniteLoop(int n) {
+ for (int i = 0; i <= n; i++) { // loops forever when n = MAX_INT
+ }
+ }
+
/// CHECK-START: void Main.deadNestedLoops() loop_optimization (before)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: Phi loop:{{B\d+}} outer_loop:<<Loop>>
@@ -160,6 +180,10 @@
/// CHECK-START: int Main.closedFormInductionUp() loop_optimization (after)
/// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:none
/// CHECK-DAG: Return loop:none
+ //
+ /// CHECK-START: int Main.closedFormInductionUp() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant 12395
+ /// CHECK-DAG: Return [<<Int>>] loop:none
static int closedFormInductionUp() {
int closed = 12345;
for (int i = 0; i < 10; i++) {
@@ -194,6 +218,10 @@
/// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:none
/// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:loop{{B\d+}}
/// CHECK-DAG: Return loop:none
+ //
+ /// CHECK-START: int Main.closedFormNested() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant 100
+ /// CHECK-DAG: Return [<<Int>>] loop:none
static int closedFormNested() {
int closed = 0;
for (int i = 0; i < 10; i++) {
@@ -215,6 +243,10 @@
/// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:none
/// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:loop{{B\d+}}
/// CHECK-DAG: Return loop:none
+ //
+ /// CHECK-START: int Main.closedFormNestedAlt() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant 15082
+ /// CHECK-DAG: Return [<<Int>>] loop:none
static int closedFormNestedAlt() {
int closed = 12345;
for (int i = 0; i < 17; i++) {
@@ -293,14 +325,29 @@
/// CHECK-START: int Main.mainIndexReturned() loop_optimization (after)
/// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:none
/// CHECK-DAG: Return loop:none
+ //
+ /// CHECK-START: int Main.mainIndexReturned() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant 10
+ /// CHECK-DAG: Return [<<Int>>] loop:none
static int mainIndexReturned() {
int i;
for (i = 0; i < 10; i++);
return i;
}
- // If ever replaced by closed form, last value should be correct!
- static int periodicReturned() {
+ /// CHECK-START: int Main.periodicReturned9() loop_optimization (before)
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Return [<<Phi2>>] loop:none
+ //
+ /// CHECK-START: int Main.periodicReturned9() loop_optimization (after)
+ /// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:none
+ /// CHECK-DAG: Return loop:none
+ //
+ /// CHECK-START: int Main.periodicReturned9() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant 1
+ /// CHECK-DAG: Return [<<Int>>] loop:none
+ static int periodicReturned9() {
int k = 0;
for (int i = 0; i < 9; i++) {
k = 1 - k;
@@ -308,6 +355,26 @@
return k;
}
+ /// CHECK-START: int Main.periodicReturned10() loop_optimization (before)
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Return [<<Phi2>>] loop:none
+ //
+ /// CHECK-START: int Main.periodicReturned10() loop_optimization (after)
+ /// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:none
+ /// CHECK-DAG: Return loop:none
+ //
+ /// CHECK-START: int Main.periodicReturned10() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant 0
+ /// CHECK-DAG: Return [<<Int>>] loop:none
+ static int periodicReturned10() {
+ int k = 0;
+ for (int i = 0; i < 10; i++) {
+ k = 1 - k;
+ }
+ return k;
+ }
+
// If ever replaced by closed form, last value should be correct!
private static int getSum21() {
int k = 0;
@@ -326,7 +393,14 @@
return i;
}
- // If ever replaced by closed form, last value should be correct!
+ /// CHECK-START: int Main.periodicReturnedN(int) loop_optimization (before)
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Return [<<Phi2>>] loop:none
+ //
+ /// CHECK-START: int Main.periodicReturnedN(int) loop_optimization (after)
+ /// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:none
+ /// CHECK-DAG: Return loop:none
static int periodicReturnedN(int n) {
int k = 0;
for (int i = 0; i < n; i++) {
@@ -371,6 +445,10 @@
/// CHECK-START: int Main.closedFeed() loop_optimization (after)
/// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:none
/// CHECK-DAG: Return loop:none
+ //
+ /// CHECK-START: int Main.closedFeed() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant 20
+ /// CHECK-DAG: Return [<<Int>>] loop:none
private static int closedFeed() {
int closed = 0;
for (int i = 0; i < 10; i++) {
@@ -392,6 +470,10 @@
/// CHECK-START: int Main.closedLargeUp() loop_optimization (after)
/// CHECK-NOT: Phi loop:B\d+ outer_loop:none
/// CHECK-DAG: Return loop:none
+ //
+ /// CHECK-START: int Main.closedLargeUp() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant -10
+ /// CHECK-DAG: Return [<<Int>>] loop:none
private static int closedLargeUp() {
int closed = 0;
for (int i = 0; i < 10; i++) {
@@ -408,6 +490,10 @@
/// CHECK-START: int Main.closedLargeDown() loop_optimization (after)
/// CHECK-NOT: Phi loop:B\d+ outer_loop:none
/// CHECK-DAG: Return loop:none
+ //
+ /// CHECK-START: int Main.closedLargeDown() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant 10
+ /// CHECK-DAG: Return [<<Int>>] loop:none
private static int closedLargeDown() {
int closed = 0;
for (int i = 0; i < 10; i++) {
@@ -427,6 +513,10 @@
/// CHECK-START: int Main.waterFall() loop_optimization (after)
/// CHECK-NOT: Phi loop:B\d+ outer_loop:none
/// CHECK-DAG: Return loop:none
+ //
+ /// CHECK-START: int Main.waterFall() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant 50
+ /// CHECK-DAG: Return [<<Int>>] loop:none
private static int waterFall() {
int i = 0;
for (; i < 10; i++);
@@ -469,6 +559,8 @@
public static void main(String[] args) {
deadSingleLoop();
+ deadSingleLoopN(4);
+ potentialInfiniteLoop(4);
deadNestedLoops();
deadNestedAndFollowingLoops();
@@ -512,7 +604,8 @@
}
expectEquals(10, mainIndexReturned());
- expectEquals(1, periodicReturned());
+ expectEquals(1, periodicReturned9());
+ expectEquals(0, periodicReturned10());
expectEquals(21, getSum21());
for (int n = -4; n < 4; n++) {
int tc = (n <= 0) ? 0 : n;
diff --git a/test/904-object-allocation/src/Main.java b/test/904-object-allocation/src/Main.java
index 9a089bd..fc8a112 100644
--- a/test/904-object-allocation/src/Main.java
+++ b/test/904-object-allocation/src/Main.java
@@ -40,7 +40,11 @@
}
public static void doTest(ArrayList<Object> l) throws Exception {
- setupObjectAllocCallback();
+ // Disable the global registration from OnLoad, to get into a known state.
+ enableAllocationTracking(null, false);
+
+ // Enable actual logging callback.
+ setupObjectAllocCallback(true);
enableAllocationTracking(null, true);
@@ -74,6 +78,11 @@
testThread(l, false, true);
l.add(new Byte((byte)0));
+
+ // Disable actual logging callback and re-enable tracking, so we can keep the event enabled and
+ // check that shutdown works correctly.
+ setupObjectAllocCallback(false);
+ enableAllocationTracking(null, true);
}
private static void testThread(final ArrayList<Object> l, final boolean sameThread,
@@ -82,6 +91,8 @@
final SimpleBarrier trackBarrier = new SimpleBarrier(1);
final SimpleBarrier disableBarrier = new SimpleBarrier(1);
+ final Thread thisThread = Thread.currentThread();
+
Thread t = new Thread() {
public void run() {
try {
@@ -95,7 +106,7 @@
l.add(new Double(0.0));
if (disableTracking) {
- enableAllocationTracking(sameThread ? this : Thread.currentThread(), false);
+ enableAllocationTracking(sameThread ? this : thisThread, false);
}
}
};
@@ -127,6 +138,6 @@
}
}
- private static native void setupObjectAllocCallback();
+ private static native void setupObjectAllocCallback(boolean enable);
private static native void enableAllocationTracking(Thread thread, boolean enable);
}
diff --git a/test/904-object-allocation/tracking.cc b/test/904-object-allocation/tracking.cc
index b22fc6c..57bfed5 100644
--- a/test/904-object-allocation/tracking.cc
+++ b/test/904-object-allocation/tracking.cc
@@ -58,10 +58,10 @@
}
extern "C" JNIEXPORT void JNICALL Java_Main_setupObjectAllocCallback(
- JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED) {
+ JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED, jboolean enable) {
jvmtiEventCallbacks callbacks;
memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
- callbacks.VMObjectAlloc = ObjectAllocated;
+ callbacks.VMObjectAlloc = enable ? ObjectAllocated : nullptr;
jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
if (ret != JVMTI_ERROR_NONE) {
@@ -94,6 +94,7 @@
printf("Unable to get jvmti env!\n");
return 1;
}
+ jvmti_env->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_OBJECT_ALLOC, nullptr);
return 0;
}
diff --git a/test/978-virtual-interface/build b/test/978-virtual-interface/build
new file mode 100755
index 0000000..14230c2
--- /dev/null
+++ b/test/978-virtual-interface/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+./default-build "$@" --experimental default-methods
diff --git a/test/978-virtual-interface/expected.txt b/test/978-virtual-interface/expected.txt
new file mode 100644
index 0000000..99071b1
--- /dev/null
+++ b/test/978-virtual-interface/expected.txt
@@ -0,0 +1 @@
+Recieved expected ICCE error!
diff --git a/test/978-virtual-interface/info.txt b/test/978-virtual-interface/info.txt
new file mode 100644
index 0000000..0b8a39f
--- /dev/null
+++ b/test/978-virtual-interface/info.txt
@@ -0,0 +1,7 @@
+Smali-based regression test for b/32201623
+
+This test cannot be run with --jvm.
+
+This test checks that we correctly detect when one attempts to invoke an
+interface method via the invoke-virtual opcode and that correct exceptions are
+sent.
diff --git a/test/978-virtual-interface/smali/Iface.smali b/test/978-virtual-interface/smali/Iface.smali
new file mode 100644
index 0000000..9c3ef7a
--- /dev/null
+++ b/test/978-virtual-interface/smali/Iface.smali
@@ -0,0 +1,110 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# // Methods are sorted in alphabetical order in dex file. We need 10 padding
+# // methods to ensure the 11'th target lines up to the same vtable slot as the
+# // first Subtype virtual method (the other 10 are the java/lang/Object;
+# // methods).
+# interface Iface {
+# public default void fakeMethod_A() {}
+# public default void fakeMethod_B() {}
+# public default void fakeMethod_C() {}
+# public default void fakeMethod_D() {}
+# public default void fakeMethod_E() {}
+# public default void fakeMethod_F() {}
+# public default void fakeMethod_G() {}
+# public default void fakeMethod_H() {}
+# public default void fakeMethod_I() {}
+# public default void fakeMethod_J() {}
+# public default void fakeMethod_K() {}
+# public default void fakeMethod_Target() {}
+# }
+
+.class public abstract interface LIface;
+
+.super Ljava/lang/Object;
+
+# // 1
+.method public fakeMethod_A()V
+ .locals 0
+ return-void
+.end method
+
+# // 2
+.method public fakeMethod_B()V
+ .locals 0
+ return-void
+.end method
+
+# // 3
+.method public fakeMethod_C()V
+ .locals 0
+ return-void
+.end method
+
+# // 4
+.method public fakeMethod_D()V
+ .locals 0
+ return-void
+.end method
+
+# // 5
+.method public fakeMethod_E()V
+ .locals 0
+ return-void
+.end method
+
+# // 5
+.method public fakeMethod_F()V
+ .locals 0
+ return-void
+.end method
+
+# // 6
+.method public fakeMethod_G()V
+ .locals 0
+ return-void
+.end method
+
+# // 7
+.method public fakeMethod_H()V
+ .locals 0
+ return-void
+.end method
+
+# // 8
+.method public fakeMethod_I()V
+ .locals 0
+ return-void
+.end method
+
+# // 9
+.method public fakeMethod_J()V
+ .locals 0
+ return-void
+.end method
+
+# // 10
+.method public fakeMethod_K()V
+ .locals 0
+ return-void
+.end method
+
+# // 11
+.method public fakeMethod_Target()V
+ .locals 0
+ return-void
+.end method
diff --git a/test/978-virtual-interface/smali/Main.smali b/test/978-virtual-interface/smali/Main.smali
new file mode 100644
index 0000000..61b82f3
--- /dev/null
+++ b/test/978-virtual-interface/smali/Main.smali
@@ -0,0 +1,50 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public class Main {
+# public static void main(String[] s) {
+# Subtype s = new Subtype();
+# try {
+# s.callPackage();
+# System.out.println("No error thrown!");
+# } catch (IncompatibleClassChangeError e) {
+# System.out.println("Recieved expected ICCE error!");
+# }
+# }
+# }
+
+.class public LMain;
+
+.super Ljava/lang/Object;
+
+.method public static main([Ljava/lang/String;)V
+ .locals 3
+
+ new-instance v0, LSubtype;
+ invoke-direct {v0}, LSubtype;-><init>()V
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ :try_start
+ invoke-virtual {v0}, LSubtype;->callPackage()V
+ const-string v1, "No error thrown!"
+ invoke-virtual {v2, v1}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+ return-void
+ :try_end
+ .catch Ljava/lang/IncompatibleClassChangeError; {:try_start .. :try_end} :error_start
+ :error_start
+ const-string v1, "Recieved expected ICCE error!"
+ invoke-virtual {v2, v1}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+ return-void
+.end method
diff --git a/test/978-virtual-interface/smali/Subtype.smali b/test/978-virtual-interface/smali/Subtype.smali
new file mode 100644
index 0000000..f876cf9
--- /dev/null
+++ b/test/978-virtual-interface/smali/Subtype.smali
@@ -0,0 +1,40 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public class Subtype extends pkg.Target implements Iface{
+# public void callPackage() {
+# // Fake into a virtual call.
+# // ((Iface)this).fakeMethod_Target();
+# }
+# }
+
+.class public LSubtype;
+
+.super Lpkg/Target;
+
+.implements LIface;
+
+.method public constructor <init>()V
+ .locals 0
+ invoke-direct {p0}, Lpkg/Target;-><init>()V
+ return-void
+.end method
+
+.method public callPackage()V
+ .locals 0
+ invoke-virtual {p0}, LIface;->fakeMethod_Target()V
+ return-void
+.end method
diff --git a/test/978-virtual-interface/smali/Target.smali b/test/978-virtual-interface/smali/Target.smali
new file mode 100644
index 0000000..70108fb
--- /dev/null
+++ b/test/978-virtual-interface/smali/Target.smali
@@ -0,0 +1,40 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# package pkg;
+# public class Target {
+# public void packageMethod() {
+# System.out.println("Package method called!");
+# }
+# }
+
+.class public Lpkg/Target;
+
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+ .locals 0
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method packageMethod()V
+ .locals 2
+ const-string v1, "Package method called!"
+ sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {v0, v1}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+ return-void
+.end method
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 7a5dab0..34f8838 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -233,11 +233,9 @@
# Disable 149-suspend-all-stress, its output is flaky (b/28988206).
# Disable 577-profile-foreign-dex (b/27454772).
-# Disable 955-methodhandles-smali until the accompanying smali change has been landed.
TEST_ART_BROKEN_ALL_TARGET_TESTS := \
149-suspend-all-stress \
577-profile-foreign-dex \
- 955-methodhandles-smali \
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \