Merge "Combine direct_methods_ and virtual_methods_ fields of mirror::Class"
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index a24c8a3..4d2d924 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -117,7 +117,7 @@
}
void CompilerOptions::ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage) {
- ParseUintOption(option, "--inline-max-code-units=", &inline_max_code_units_, Usage);
+ ParseUintOption(option, "--inline-max-code-units", &inline_max_code_units_, Usage);
}
void CompilerOptions::ParseDisablePasses(const StringPiece& option,
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index e806acf..06553a6 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -301,40 +301,30 @@
uint32_t high_pc_ = 0;
};
- struct LocalVariable {
- uint16_t vreg;
- uint32_t dex_pc_low;
- uint32_t dex_pc_high;
- const char* name;
- const char* type;
- const char* sig;
- };
+ typedef std::vector<DexFile::LocalInfo> LocalInfos;
- struct DebugInfoCallback {
- static void NewLocal(void* ctx,
- uint16_t vreg,
- uint32_t start,
- uint32_t end,
- const char* name,
- const char* type,
- const char* sig) {
- auto* context = static_cast<DebugInfoCallback*>(ctx);
- if (name != nullptr && type != nullptr) {
- context->local_variables_.push_back({vreg, start, end, name, type, sig});
- }
- }
- std::vector<LocalVariable> local_variables_;
- };
+ void LocalInfoCallback(void* ctx, const DexFile::LocalInfo& entry) {
+ static_cast<LocalInfos*>(ctx)->push_back(entry);
+ }
+
+ typedef std::vector<DexFile::PositionInfo> PositionInfos;
+
+ bool PositionInfoCallback(void* ctx, const DexFile::PositionInfo& entry) {
+ static_cast<PositionInfos*>(ctx)->push_back(entry);
+ return false;
+ }
std::vector<const char*> GetParamNames(const MethodDebugInfo* mi) {
std::vector<const char*> names;
- const uint8_t* stream = mi->dex_file_->GetDebugInfoStream(mi->code_item_);
- if (stream != nullptr) {
- DecodeUnsignedLeb128(&stream); // line.
- uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
- for (uint32_t i = 0; i < parameters_size; ++i) {
- uint32_t id = DecodeUnsignedLeb128P1(&stream);
- names.push_back(mi->dex_file_->StringDataByIdx(id));
+ if (mi->code_item_ != nullptr) {
+ const uint8_t* stream = mi->dex_file_->GetDebugInfoStream(mi->code_item_);
+ if (stream != nullptr) {
+ DecodeUnsignedLeb128(&stream); // line.
+ uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
+ for (uint32_t i = 0; i < parameters_size; ++i) {
+ uint32_t id = DecodeUnsignedLeb128P1(&stream);
+ names.push_back(mi->dex_file_->StringDataByIdx(id));
+ }
}
}
return names;
@@ -454,6 +444,7 @@
const char* last_dex_class_desc = nullptr;
for (auto mi : compilation_unit.methods_) {
const DexFile* dex = mi->dex_file_;
+ const DexFile::CodeItem* dex_code = mi->code_item_;
const DexFile::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index_);
const DexFile::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method);
const DexFile::TypeList* dex_params = dex->GetProtoParameters(dex_proto);
@@ -473,19 +464,6 @@
last_dex_class_desc = dex_class_desc;
}
- // Collect information about local variables and parameters.
- DebugInfoCallback debug_info_callback;
- std::vector<const char*> param_names;
- if (mi->code_item_ != nullptr) {
- dex->DecodeDebugInfo(mi->code_item_,
- is_static,
- mi->dex_method_index_,
- nullptr,
- DebugInfoCallback::NewLocal,
- &debug_info_callback);
- param_names = GetParamNames(mi);
- }
-
int start_depth = info_.Depth();
info_.StartTag(DW_TAG_subprogram);
WriteName(dex->GetMethodName(dex_method));
@@ -494,50 +472,70 @@
uint8_t frame_base[] = { DW_OP_call_frame_cfa };
info_.WriteExprLoc(DW_AT_frame_base, &frame_base, sizeof(frame_base));
WriteLazyType(dex->GetReturnTypeDescriptor(dex_proto));
- uint32_t vreg = mi->code_item_ == nullptr ? 0 :
- mi->code_item_->registers_size_ - mi->code_item_->ins_size_;
+
+ // Write parameters. DecodeDebugLocalInfo returns them as well, but it does not
+ // guarantee order or uniqueness so it is safer to iterate over them manually.
+ // DecodeDebugLocalInfo might not also be available if there is no debug info.
+ std::vector<const char*> param_names = GetParamNames(mi);
+ uint32_t arg_reg = 0;
if (!is_static) {
info_.StartTag(DW_TAG_formal_parameter);
WriteName("this");
info_.WriteFlag(DW_AT_artificial, true);
WriteLazyType(dex_class_desc);
- const bool is64bitValue = false;
- WriteRegLocation(mi, vreg, is64bitValue, compilation_unit.low_pc_);
- vreg++;
+ if (dex_code != nullptr) {
+ // Write the stack location of the parameter.
+ const uint32_t vreg = dex_code->registers_size_ - dex_code->ins_size_ + arg_reg;
+ const bool is64bitValue = false;
+ WriteRegLocation(mi, vreg, is64bitValue, compilation_unit.low_pc_);
+ }
+ arg_reg++;
info_.EndTag();
}
if (dex_params != nullptr) {
for (uint32_t i = 0; i < dex_params->Size(); ++i) {
info_.StartTag(DW_TAG_formal_parameter);
// Parameter names may not be always available.
- if (i < param_names.size() && param_names[i] != nullptr) {
+ if (i < param_names.size()) {
WriteName(param_names[i]);
}
// Write the type.
const char* type_desc = dex->StringByTypeIdx(dex_params->GetTypeItem(i).type_idx_);
WriteLazyType(type_desc);
- // Write the stack location of the parameter.
const bool is64bitValue = type_desc[0] == 'D' || type_desc[0] == 'J';
- WriteRegLocation(mi, vreg, is64bitValue, compilation_unit.low_pc_);
- vreg += is64bitValue ? 2 : 1;
+ if (dex_code != nullptr) {
+ // Write the stack location of the parameter.
+ const uint32_t vreg = dex_code->registers_size_ - dex_code->ins_size_ + arg_reg;
+ WriteRegLocation(mi, vreg, is64bitValue, compilation_unit.low_pc_);
+ }
+ arg_reg += is64bitValue ? 2 : 1;
info_.EndTag();
}
- if (mi->code_item_ != nullptr) {
- CHECK_EQ(vreg, mi->code_item_->registers_size_);
+ if (dex_code != nullptr) {
+ DCHECK_EQ(arg_reg, dex_code->ins_size_);
}
}
- for (const LocalVariable& var : debug_info_callback.local_variables_) {
- const uint32_t first_arg = mi->code_item_->registers_size_ - mi->code_item_->ins_size_;
- if (var.vreg < first_arg) {
- info_.StartTag(DW_TAG_variable);
- WriteName(var.name);
- WriteLazyType(var.type);
- bool is64bitValue = var.type[0] == 'D' || var.type[0] == 'J';
- WriteRegLocation(mi, var.vreg, is64bitValue, compilation_unit.low_pc_,
- var.dex_pc_low, var.dex_pc_high);
- info_.EndTag();
+
+ // Write local variables.
+ LocalInfos local_infos;
+ if (dex->DecodeDebugLocalInfo(dex_code,
+ is_static,
+ mi->dex_method_index_,
+ LocalInfoCallback,
+ &local_infos)) {
+ for (const DexFile::LocalInfo& var : local_infos) {
+ if (var.reg_ < dex_code->registers_size_ - dex_code->ins_size_) {
+ info_.StartTag(DW_TAG_variable);
+ WriteName(var.name_);
+ WriteLazyType(var.descriptor_);
+ bool is64bitValue = var.descriptor_[0] == 'D' || var.descriptor_[0] == 'J';
+ WriteRegLocation(mi, var.reg_, is64bitValue, compilation_unit.low_pc_,
+ var.start_address_, var.end_address_);
+ info_.EndTag();
+ }
}
}
+
info_.EndTag();
CHECK_EQ(info_.Depth(), start_depth); // Balanced start/end.
}
@@ -708,8 +706,7 @@
// to be enclosed in the right set of namespaces. Therefore we
// just define all types lazily at the end of compilation unit.
void WriteLazyType(const char* type_descriptor) {
- DCHECK(type_descriptor != nullptr);
- if (type_descriptor[0] != 'V') {
+ if (type_descriptor != nullptr && type_descriptor[0] != 'V') {
lazy_types_.emplace(type_descriptor, info_.size());
info_.WriteRef4(DW_AT_type, 0);
}
@@ -724,7 +721,9 @@
private:
void WriteName(const char* name) {
- info_.WriteStrp(DW_AT_name, owner_->WriteString(name));
+ if (name != nullptr) {
+ info_.WriteStrp(DW_AT_name, owner_->WriteString(name));
+ }
}
// Helper which writes DWARF expression referencing a register.
@@ -976,29 +975,15 @@
continue;
}
- // Create mapping table from dex to source line.
- struct DebugInfoCallbacks {
- static bool NewPosition(void* ctx, uint32_t address, uint32_t line) {
- auto* context = static_cast<DebugInfoCallbacks*>(ctx);
- context->dex2line_.push_back({address, static_cast<int32_t>(line)});
- return false;
- }
- DefaultSrcMap dex2line_;
- } debug_info_callbacks;
-
Elf_Addr method_address = text_address + mi->low_pc_;
+ PositionInfos position_infos;
const DexFile* dex = mi->dex_file_;
- if (mi->code_item_ != nullptr) {
- dex->DecodeDebugInfo(mi->code_item_,
- (mi->access_flags_ & kAccStatic) != 0,
- mi->dex_method_index_,
- DebugInfoCallbacks::NewPosition,
- nullptr,
- &debug_info_callbacks);
+ if (!dex->DecodeDebugPositionInfo(mi->code_item_, PositionInfoCallback, &position_infos)) {
+ continue;
}
- if (debug_info_callbacks.dex2line_.empty()) {
+ if (position_infos.empty()) {
continue;
}
@@ -1053,20 +1038,23 @@
opcodes.SetFile(file_index);
// Generate mapping opcodes from PC to Java lines.
- const DefaultSrcMap& dex2line_map = debug_info_callbacks.dex2line_;
- if (file_index != 0 && !dex2line_map.empty()) {
+ if (file_index != 0) {
bool first = true;
for (SrcMapElem pc2dex : src_mapping_table) {
uint32_t pc = pc2dex.from_;
int dex_pc = pc2dex.to_;
- auto dex2line = dex2line_map.Find(static_cast<uint32_t>(dex_pc));
- if (dex2line.first) {
- int line = dex2line.second;
+ // Find mapping with address with is greater than our dex pc; then go back one step.
+ auto ub = std::upper_bound(position_infos.begin(), position_infos.end(), dex_pc,
+ [](uint32_t address, const DexFile::PositionInfo& entry) {
+ return address < entry.address_;
+ });
+ if (ub != position_infos.begin()) {
+ int line = (--ub)->line_;
if (first) {
first = false;
if (pc > 0) {
// Assume that any preceding code is prologue.
- int first_line = dex2line_map.front().to_;
+ int first_line = position_infos.front().line_;
// Prologue is not a sensible place for a breakpoint.
opcodes.NegateStmt();
opcodes.AddRow(method_address, first_line);
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index d6aafcb..4fd73be 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -255,7 +255,7 @@
EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(28U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(131 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
+ EXPECT_EQ(132 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
}
TEST_F(OatTest, OatHeaderIsValid) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 0fb552a..469dd49 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -433,6 +433,56 @@
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86);
};
+// Slow path marking an object during a read barrier.
+class ReadBarrierMarkSlowPathX86 : public SlowPathCode {
+ public:
+ ReadBarrierMarkSlowPathX86(HInstruction* instruction, Location out, Location obj)
+ : instruction_(instruction), out_(out), obj_(obj) {
+ DCHECK(kEmitCompilerReadBarrier);
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathX86"; }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ Register reg_out = out_.AsRegister<Register>();
+ DCHECK(locations->CanCall());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
+ DCHECK(instruction_->IsInstanceFieldGet() ||
+ instruction_->IsStaticFieldGet() ||
+ instruction_->IsArrayGet() ||
+ instruction_->IsLoadClass() ||
+ instruction_->IsLoadString() ||
+ instruction_->IsInstanceOf() ||
+ instruction_->IsCheckCast())
+ << "Unexpected instruction in read barrier marking slow path: "
+ << instruction_->DebugName();
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+ x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), obj_);
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierMark),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickReadBarrierMark, mirror::Object*, mirror::Object*>();
+ x86_codegen->Move32(out_, Location::RegisterLocation(EAX));
+
+ RestoreLiveRegisters(codegen, locations);
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ HInstruction* const instruction_;
+ const Location out_;
+ const Location obj_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathX86);
+};
+
// Slow path generating a read barrier for a heap reference.
class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode {
public:
@@ -454,7 +504,7 @@
// to be instrumented, e.g.:
//
// __ movl(out, Address(out, offset));
- // codegen_->GenerateReadBarrier(instruction, out_loc, out_loc, out_loc, offset);
+ // codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset);
//
// In that case, we have lost the information about the original
// object, and the emitted read barrier cannot work properly.
@@ -470,7 +520,9 @@
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
DCHECK(!instruction_->IsInvoke() ||
(instruction_->IsInvokeStaticOrDirect() &&
- instruction_->GetLocations()->Intrinsified()));
+ instruction_->GetLocations()->Intrinsified()))
+ << "Unexpected instruction in read barrier for heap reference slow path: "
+ << instruction_->DebugName();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -612,14 +664,18 @@
class ReadBarrierForRootSlowPathX86 : public SlowPathCode {
public:
ReadBarrierForRootSlowPathX86(HInstruction* instruction, Location out, Location root)
- : instruction_(instruction), out_(out), root_(root) {}
+ : instruction_(instruction), out_(out), root_(root) {
+ DCHECK(kEmitCompilerReadBarrier);
+ }
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Register reg_out = out_.AsRegister<Register>();
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
- DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString());
+ DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
+ << "Unexpected instruction in read barrier for GC root slow path: "
+ << instruction_->DebugName();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -1831,7 +1887,7 @@
}
void InstructionCodeGeneratorX86::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
- GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
+ codegen_->GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
}
void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
@@ -4092,7 +4148,7 @@
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorX86::GenerateMemoryBarrier(MemBarrierKind kind) {
+void CodeGeneratorX86::GenerateMemoryBarrier(MemBarrierKind kind) {
/*
* According to the JSR-133 Cookbook, for x86 only StoreLoad/AnyAny barriers need memory fence.
* All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86 memory model.
@@ -4346,9 +4402,14 @@
if (field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong)) {
// Long values can be loaded atomically into an XMM using movsd.
- // So we use an XMM register as a temp to achieve atomicity (first load the temp into the XMM
- // and then copy the XMM into the output 32bits at a time).
+ // So we use an XMM register as a temp to achieve atomicity (first
+ // load the temp into the XMM and then copy the XMM into the
+ // output, 32 bits at a time).
locations->AddTemp(Location::RequiresFpuRegister());
+ } else if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
+ // We need a temporary register for the read barrier marking slow
+ // path in CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier.
+ locations->AddTemp(Location::RequiresRegister());
}
}
@@ -4386,9 +4447,32 @@
}
case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
__ movl(out.AsRegister<Register>(), Address(base, offset));
break;
+
+ case Primitive::kPrimNot: {
+ // /* HeapReference<Object> */ out = *(base + offset)
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ Location temp_loc = locations->GetTemp(0);
+ // Note that a potential implicit null check is handled in this
+ // CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier call.
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ instruction, out, base, offset, temp_loc, /* needs_null_check */ true);
+ if (is_volatile) {
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
+ } else {
+ __ movl(out.AsRegister<Register>(), Address(base, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (is_volatile) {
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
+ codegen_->MaybeGenerateReadBarrierSlow(instruction, out, out, base_loc, offset);
+ }
+ break;
}
case Primitive::kPrimLong: {
@@ -4423,17 +4507,20 @@
UNREACHABLE();
}
- // Longs are handled in the switch.
- if (field_type != Primitive::kPrimLong) {
+ if (field_type == Primitive::kPrimNot || field_type == Primitive::kPrimLong) {
+ // Potential implicit null checks, in the case of reference or
+ // long fields, are handled in the previous switch statement.
+ } else {
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
if (is_volatile) {
- GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
- }
-
- if (field_type == Primitive::kPrimNot) {
- codegen_->MaybeGenerateReadBarrier(instruction, out, out, base_loc, offset);
+ if (field_type == Primitive::kPrimNot) {
+ // Memory barriers, in the case of references, are also handled
+ // in the previous switch statement.
+ } else {
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
}
}
@@ -4498,7 +4585,7 @@
CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
if (is_volatile) {
- GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
}
bool maybe_record_implicit_null_check_done = false;
@@ -4603,7 +4690,7 @@
}
if (is_volatile) {
- GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
}
}
@@ -4784,6 +4871,11 @@
Location::kOutputOverlap :
Location::kNoOutputOverlap);
}
+ // We need a temporary register for the read barrier marking slow
+ // path in CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier.
+ if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
@@ -4791,12 +4883,13 @@
Location obj_loc = locations->InAt(0);
Register obj = obj_loc.AsRegister<Register>();
Location index = locations->InAt(1);
+ Location out_loc = locations->Out();
Primitive::Type type = instruction->GetType();
switch (type) {
case Primitive::kPrimBoolean: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Register out = locations->Out().AsRegister<Register>();
+ Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
__ movzxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
@@ -4808,7 +4901,7 @@
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
- Register out = locations->Out().AsRegister<Register>();
+ Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
__ movsxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
@@ -4820,7 +4913,7 @@
case Primitive::kPrimShort: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
- Register out = locations->Out().AsRegister<Register>();
+ Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
__ movsxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
@@ -4832,7 +4925,7 @@
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Register out = locations->Out().AsRegister<Register>();
+ Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
__ movzxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
@@ -4842,13 +4935,9 @@
break;
}
- case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
- static_assert(
- sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
- "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ case Primitive::kPrimInt: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Register out = locations->Out().AsRegister<Register>();
+ Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
__ movl(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
@@ -4858,20 +4947,56 @@
break;
}
+ case Primitive::kPrimNot: {
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ // /* HeapReference<Object> */ out =
+ // *(obj + data_offset + index * sizeof(HeapReference<Object>))
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ Location temp = locations->GetTemp(0);
+ // Note that a potential implicit null check is handled in this
+ // CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier call.
+ codegen_->GenerateArrayLoadWithBakerReadBarrier(
+ instruction, out_loc, obj, data_offset, index, temp, /* needs_null_check */ true);
+ } else {
+ Register out = out_loc.AsRegister<Register>();
+ if (index.IsConstant()) {
+ uint32_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ movl(out, Address(obj, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
+ codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
+ } else {
+ __ movl(out, Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
+ codegen_->MaybeGenerateReadBarrierSlow(
+ instruction, out_loc, out_loc, obj_loc, data_offset, index);
+ }
+ }
+ break;
+ }
+
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- Location out = locations->Out();
- DCHECK_NE(obj, out.AsRegisterPairLow<Register>());
+ DCHECK_NE(obj, out_loc.AsRegisterPairLow<Register>());
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ movl(out.AsRegisterPairLow<Register>(), Address(obj, offset));
+ __ movl(out_loc.AsRegisterPairLow<Register>(), Address(obj, offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(out.AsRegisterPairHigh<Register>(), Address(obj, offset + kX86WordSize));
+ __ movl(out_loc.AsRegisterPairHigh<Register>(), Address(obj, offset + kX86WordSize));
} else {
- __ movl(out.AsRegisterPairLow<Register>(),
+ __ movl(out_loc.AsRegisterPairLow<Register>(),
Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(out.AsRegisterPairHigh<Register>(),
+ __ movl(out_loc.AsRegisterPairHigh<Register>(),
Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize));
}
break;
@@ -4879,7 +5004,7 @@
case Primitive::kPrimFloat: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
- XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
+ XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
if (index.IsConstant()) {
__ movss(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
@@ -4891,7 +5016,7 @@
case Primitive::kPrimDouble: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
- XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
+ XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
if (index.IsConstant()) {
__ movsd(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
@@ -4906,23 +5031,12 @@
UNREACHABLE();
}
- if (type != Primitive::kPrimLong) {
+ if (type == Primitive::kPrimNot || type == Primitive::kPrimLong) {
+ // Potential implicit null checks, in the case of reference or
+ // long arrays, are handled in the previous switch statement.
+ } else {
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
-
- if (type == Primitive::kPrimNot) {
- static_assert(
- sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
- "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Location out = locations->Out();
- if (index.IsConstant()) {
- uint32_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- codegen_->MaybeGenerateReadBarrier(instruction, out, out, obj_loc, offset);
- } else {
- codegen_->MaybeGenerateReadBarrier(instruction, out, out, obj_loc, data_offset, index);
- }
- }
}
void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) {
@@ -5054,12 +5168,12 @@
// __ movl(temp2, temp);
// // /* HeapReference<Class> */ temp = temp->component_type_
// __ movl(temp, Address(temp, component_offset));
- // codegen_->GenerateReadBarrier(
+ // codegen_->GenerateReadBarrierSlow(
// instruction, temp_loc, temp_loc, temp2_loc, component_offset);
//
// // /* HeapReference<Class> */ temp2 = register_value->klass_
// __ movl(temp2, Address(register_value, class_offset));
- // codegen_->GenerateReadBarrier(
+ // codegen_->GenerateReadBarrierSlow(
// instruction, temp2_loc, temp2_loc, value, class_offset, temp_loc);
//
// __ cmpl(temp, temp2);
@@ -5340,8 +5454,8 @@
DCHECK_EQ(slow_path->GetSuccessor(), successor);
}
- __ fs()->cmpw(Address::Absolute(
- Thread::ThreadFlagsOffset<kX86WordSize>().Int32Value()), Immediate(0));
+ __ fs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86WordSize>().Int32Value()),
+ Immediate(0));
if (successor == nullptr) {
__ j(kNotEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
@@ -5622,32 +5736,16 @@
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- uint32_t declaring_class_offset = ArtMethod::DeclaringClassOffset().Int32Value();
- if (kEmitCompilerReadBarrier) {
- // /* GcRoot<mirror::Class>* */ out = &(current_method->declaring_class_)
- __ leal(out, Address(current_method, declaring_class_offset));
- // /* mirror::Class* */ out = out->Read()
- codegen_->GenerateReadBarrierForRoot(cls, out_loc, out_loc);
- } else {
- // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
- __ movl(out, Address(current_method, declaring_class_offset));
- }
+ // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
+ GenerateGcRootFieldLoad(
+ cls, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
} else {
// /* GcRoot<mirror::Class>[] */ out =
// current_method.ptr_sized_fields_->dex_cache_resolved_types_
__ movl(out, Address(current_method,
ArtMethod::DexCacheResolvedTypesOffset(kX86PointerSize).Int32Value()));
-
- size_t cache_offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex());
- if (kEmitCompilerReadBarrier) {
- // /* GcRoot<mirror::Class>* */ out = &out[type_index]
- __ leal(out, Address(out, cache_offset));
- // /* mirror::Class* */ out = out->Read()
- codegen_->GenerateReadBarrierForRoot(cls, out_loc, out_loc);
- } else {
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- __ movl(out, Address(out, cache_offset));
- }
+ // /* GcRoot<mirror::Class> */ out = out[type_index]
+ GenerateGcRootFieldLoad(cls, out_loc, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
if (!cls->IsInDexCache() || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
@@ -5711,30 +5809,14 @@
Register out = out_loc.AsRegister<Register>();
Register current_method = locations->InAt(0).AsRegister<Register>();
- uint32_t declaring_class_offset = ArtMethod::DeclaringClassOffset().Int32Value();
- if (kEmitCompilerReadBarrier) {
- // /* GcRoot<mirror::Class>* */ out = &(current_method->declaring_class_)
- __ leal(out, Address(current_method, declaring_class_offset));
- // /* mirror::Class* */ out = out->Read()
- codegen_->GenerateReadBarrierForRoot(load, out_loc, out_loc);
- } else {
- // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
- __ movl(out, Address(current_method, declaring_class_offset));
- }
-
+ // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
+ GenerateGcRootFieldLoad(
+ load, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
// /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_
__ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
-
- size_t cache_offset = CodeGenerator::GetCacheOffset(load->GetStringIndex());
- if (kEmitCompilerReadBarrier) {
- // /* GcRoot<mirror::String>* */ out = &out[string_index]
- __ leal(out, Address(out, cache_offset));
- // /* mirror::String* */ out = out->Read()
- codegen_->GenerateReadBarrierForRoot(load, out_loc, out_loc);
- } else {
- // /* GcRoot<mirror::String> */ out = out[string_index]
- __ movl(out, Address(out, cache_offset));
- }
+ // /* GcRoot<mirror::String> */ out = out[string_index]
+ GenerateGcRootFieldLoad(
+ load, out_loc, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
if (!load->IsInDexCache()) {
SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
@@ -5782,6 +5864,14 @@
CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
}
+static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) {
+ return kEmitCompilerReadBarrier &&
+ (kUseBakerReadBarrier ||
+ type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+ type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+ type_check_kind == TypeCheckKind::kArrayObjectCheck);
+}
+
void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
@@ -5807,21 +5897,22 @@
locations->SetOut(Location::RequiresRegister());
// When read barriers are enabled, we need a temporary register for
// some cases.
- if (kEmitCompilerReadBarrier &&
- (type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
+ if (TypeCheckNeedsATemporary(type_check_kind)) {
locations->AddTemp(Location::RequiresRegister());
}
}
void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
LocationSummary* locations = instruction->GetLocations();
Location obj_loc = locations->InAt(0);
Register obj = obj_loc.AsRegister<Register>();
Location cls = locations->InAt(1);
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
+ Location temp_loc = TypeCheckNeedsATemporary(type_check_kind) ?
+ locations->GetTemp(0) :
+ Location::NoLocation();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
@@ -5837,10 +5928,9 @@
}
// /* HeapReference<Class> */ out = obj->klass_
- __ movl(out, Address(obj, class_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, out_loc, obj_loc, class_offset, temp_loc);
- switch (instruction->GetTypeCheckKind()) {
+ switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
if (cls.IsRegister()) {
__ cmpl(out, cls.AsRegister<Register>());
@@ -5861,17 +5951,8 @@
// object to avoid doing a comparison we know will fail.
NearLabel loop;
__ Bind(&loop);
- Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `out` into `temp` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- Register temp = temp_loc.AsRegister<Register>();
- __ movl(temp, out);
- }
// /* HeapReference<Class> */ out = out->super_class_
- __ movl(out, Address(out, super_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, super_offset);
+ GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, temp_loc);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -5900,17 +5981,8 @@
__ cmpl(out, Address(ESP, cls.GetStackIndex()));
}
__ j(kEqual, &success);
- Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `out` into `temp` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- Register temp = temp_loc.AsRegister<Register>();
- __ movl(temp, out);
- }
// /* HeapReference<Class> */ out = out->super_class_
- __ movl(out, Address(out, super_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, super_offset);
+ GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, temp_loc);
__ testl(out, out);
__ j(kNotEqual, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
@@ -5934,17 +6006,8 @@
}
__ j(kEqual, &exact_check);
// Otherwise, we need to check that the object's class is a non-primitive array.
- Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `out` into `temp` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- Register temp = temp_loc.AsRegister<Register>();
- __ movl(temp, out);
- }
// /* HeapReference<Class> */ out = out->component_type_
- __ movl(out, Address(out, component_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, component_offset);
+ GenerateReferenceLoadOneRegister(instruction, out_loc, component_offset, temp_loc);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -5988,6 +6051,13 @@
// HInstanceOf instruction (following the runtime calling
// convention), which might be cluttered by the potential first
// read barrier emission at the beginning of this method.
+ //
+ // TODO: Introduce a new runtime entry point taking the object
+ // to test (instead of its class) as argument, and let it deal
+ // with the read barrier issues. This will let us refactor this
+ // case of the `switch` code as it was previously (with a direct
+ // call to the runtime not using a type checking slow path).
+ // This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction,
/* is_fatal */ false);
@@ -6040,27 +6110,27 @@
locations->AddTemp(Location::RequiresRegister());
// When read barriers are enabled, we need an additional temporary
// register for some cases.
- if (kEmitCompilerReadBarrier &&
- (type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
+ if (TypeCheckNeedsATemporary(type_check_kind)) {
locations->AddTemp(Location::RequiresRegister());
}
}
void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
LocationSummary* locations = instruction->GetLocations();
Location obj_loc = locations->InAt(0);
Register obj = obj_loc.AsRegister<Register>();
Location cls = locations->InAt(1);
Location temp_loc = locations->GetTemp(0);
Register temp = temp_loc.AsRegister<Register>();
+ Location temp2_loc = TypeCheckNeedsATemporary(type_check_kind) ?
+ locations->GetTemp(1) :
+ Location::NoLocation();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
- TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
bool is_type_check_slow_path_fatal =
(type_check_kind == TypeCheckKind::kExactCheck ||
type_check_kind == TypeCheckKind::kAbstractClassCheck ||
@@ -6080,8 +6150,7 @@
}
// /* HeapReference<Class> */ temp = obj->klass_
- __ movl(temp, Address(obj, class_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc);
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
@@ -6103,18 +6172,8 @@
// object to avoid doing a comparison we know will fail.
NearLabel loop, compare_classes;
__ Bind(&loop);
- Location temp2_loc =
- kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `temp` into `temp2` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- Register temp2 = temp2_loc.AsRegister<Register>();
- __ movl(temp2, temp);
- }
// /* HeapReference<Class> */ temp = temp->super_class_
- __ movl(temp, Address(temp, super_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, temp2_loc, super_offset);
+ GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, temp2_loc);
// If the class reference currently in `temp` is not null, jump
// to the `compare_classes` label to compare it with the checked
@@ -6127,8 +6186,7 @@
// going into the slow path, as it has been overwritten in the
// meantime.
// /* HeapReference<Class> */ temp = obj->klass_
- __ movl(temp, Address(obj, class_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc);
__ jmp(type_check_slow_path->GetEntryLabel());
__ Bind(&compare_classes);
@@ -6154,18 +6212,8 @@
}
__ j(kEqual, &done);
- Location temp2_loc =
- kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `temp` into `temp2` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- Register temp2 = temp2_loc.AsRegister<Register>();
- __ movl(temp2, temp);
- }
// /* HeapReference<Class> */ temp = temp->super_class_
- __ movl(temp, Address(temp, super_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, temp2_loc, super_offset);
+ GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, temp2_loc);
// If the class reference currently in `temp` is not null, jump
// back at the beginning of the loop.
@@ -6177,8 +6225,7 @@
// going into the slow path, as it has been overwritten in the
// meantime.
// /* HeapReference<Class> */ temp = obj->klass_
- __ movl(temp, Address(obj, class_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc);
__ jmp(type_check_slow_path->GetEntryLabel());
break;
}
@@ -6195,19 +6242,8 @@
__ j(kEqual, &done);
// Otherwise, we need to check that the object's class is a non-primitive array.
- Location temp2_loc =
- kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `temp` into `temp2` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- Register temp2 = temp2_loc.AsRegister<Register>();
- __ movl(temp2, temp);
- }
// /* HeapReference<Class> */ temp = temp->component_type_
- __ movl(temp, Address(temp, component_offset));
- codegen_->MaybeGenerateReadBarrier(
- instruction, temp_loc, temp_loc, temp2_loc, component_offset);
+ GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, temp2_loc);
// If the component type is not null (i.e. the object is indeed
// an array), jump to label `check_non_primitive_component_type`
@@ -6221,8 +6257,7 @@
// going into the slow path, as it has been overwritten in the
// meantime.
// /* HeapReference<Class> */ temp = obj->klass_
- __ movl(temp, Address(obj, class_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc);
__ jmp(type_check_slow_path->GetEntryLabel());
__ Bind(&check_non_primitive_component_type);
@@ -6230,8 +6265,7 @@
__ j(kEqual, &done);
// Same comment as above regarding `temp` and the slow path.
// /* HeapReference<Class> */ temp = obj->klass_
- __ movl(temp, Address(obj, class_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc);
__ jmp(type_check_slow_path->GetEntryLabel());
break;
}
@@ -6248,6 +6282,13 @@
// instruction (following the runtime calling convention), which
// might be cluttered by the potential first read barrier
// emission at the beginning of this method.
+ //
+ // TODO: Introduce a new runtime entry point taking the object
+ // to test (instead of its class) as argument, and let it deal
+ // with the read barrier issues. This will let us refactor this
+ // case of the `switch` code as it was previously (with a direct
+ // call to the runtime not using a type checking slow path).
+ // This should also be beneficial for the other cases above.
__ jmp(type_check_slow_path->GetEntryLabel());
break;
}
@@ -6409,14 +6450,226 @@
}
}
-void CodeGeneratorX86::GenerateReadBarrier(HInstruction* instruction,
- Location out,
- Location ref,
- Location obj,
- uint32_t offset,
- Location index) {
+void InstructionCodeGeneratorX86::GenerateReferenceLoadOneRegister(HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location temp) {
+ Register out_reg = out.AsRegister<Register>();
+ if (kEmitCompilerReadBarrier) {
+ if (kUseBakerReadBarrier) {
+ // Load with fast path based Baker's read barrier.
+ // /* HeapReference<Object> */ out = *(out + offset)
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ instruction, out, out_reg, offset, temp, /* needs_null_check */ false);
+ } else {
+ // Load with slow path based read barrier.
+ // Save the value of `out` into `temp` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ __ movl(temp.AsRegister<Register>(), out_reg);
+ // /* HeapReference<Object> */ out = *(out + offset)
+ __ movl(out_reg, Address(out_reg, offset));
+ codegen_->GenerateReadBarrierSlow(instruction, out, out, temp, offset);
+ }
+ } else {
+ // Plain load with no read barrier.
+ // /* HeapReference<Object> */ out = *(out + offset)
+ __ movl(out_reg, Address(out_reg, offset));
+ __ MaybeUnpoisonHeapReference(out_reg);
+ }
+}
+
+void InstructionCodeGeneratorX86::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ Location temp) {
+ Register out_reg = out.AsRegister<Register>();
+ Register obj_reg = obj.AsRegister<Register>();
+ if (kEmitCompilerReadBarrier) {
+ if (kUseBakerReadBarrier) {
+ // Load with fast path based Baker's read barrier.
+ // /* HeapReference<Object> */ out = *(obj + offset)
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ instruction, out, obj_reg, offset, temp, /* needs_null_check */ false);
+ } else {
+ // Load with slow path based read barrier.
+ // /* HeapReference<Object> */ out = *(obj + offset)
+ __ movl(out_reg, Address(obj_reg, offset));
+ codegen_->GenerateReadBarrierSlow(instruction, out, out, obj, offset);
+ }
+ } else {
+ // Plain load with no read barrier.
+ // /* HeapReference<Object> */ out = *(obj + offset)
+ __ movl(out_reg, Address(obj_reg, offset));
+ __ MaybeUnpoisonHeapReference(out_reg);
+ }
+}
+
+void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad(HInstruction* instruction,
+ Location root,
+ Register obj,
+ uint32_t offset) {
+ Register root_reg = root.AsRegister<Register>();
+ if (kEmitCompilerReadBarrier) {
+ if (kUseBakerReadBarrier) {
+ // Fast path implementation of art::ReadBarrier::BarrierForRoot when
+ // Baker's read barrier are used:
+ //
+ // root = obj.field;
+ // if (Thread::Current()->GetIsGcMarking()) {
+ // root = ReadBarrier::Mark(root)
+ // }
+
+ // /* GcRoot<mirror::Object> */ root = *(obj + offset)
+ __ movl(root_reg, Address(obj, offset));
+ static_assert(
+ sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
+ "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
+ "have different sizes.");
+ static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::CompressedReference<mirror::Object> and int32_t "
+ "have different sizes.");
+
+ // Slow path used to mark the GC root `root`.
+ SlowPathCode* slow_path =
+ new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(instruction, root, root);
+ codegen_->AddSlowPath(slow_path);
+
+ __ fs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86WordSize>().Int32Value()),
+ Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ } else {
+ // GC root loaded through a slow path for read barriers other
+ // than Baker's.
+ // /* GcRoot<mirror::Object>* */ root = obj + offset
+ __ leal(root_reg, Address(obj, offset));
+ // /* mirror::Object* */ root = root->Read()
+ codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
+ }
+ } else {
+ // Plain GC root load with no read barrier.
+ // /* GcRoot<mirror::Object> */ root = *(obj + offset)
+ __ movl(root_reg, Address(obj, offset));
+ }
+}
+
+void CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ uint32_t offset,
+ Location temp,
+ bool needs_null_check) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // /* HeapReference<Object> */ ref = *(obj + offset)
+ Address src(obj, offset);
+ GenerateReferenceLoadWithBakerReadBarrier(instruction, ref, obj, src, temp, needs_null_check);
+}
+
+void CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ uint32_t data_offset,
+ Location index,
+ Location temp,
+ bool needs_null_check) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // /* HeapReference<Object> */ ref =
+ // *(obj + data_offset + index * sizeof(HeapReference<Object>))
+ Address src = index.IsConstant() ?
+ Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset) :
+ Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset);
+ GenerateReferenceLoadWithBakerReadBarrier(instruction, ref, obj, src, temp, needs_null_check);
+}
+
+void CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ const Address& src,
+ Location temp,
+ bool needs_null_check) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // In slow path based read barriers, the read barrier call is
+ // inserted after the original load. However, in fast path based
+ // Baker's read barriers, we need to perform the load of
+ // mirror::Object::monitor_ *before* the original reference load.
+ // This load-load ordering is required by the read barrier.
+ // The fast path/slow path (for Baker's algorithm) should look like:
+ //
+ // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
+ // lfence; // Load fence or artificial data dependency to prevent load-load reordering
+ // HeapReference<Object> ref = *src; // Original reference load.
+ // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // if (is_gray) {
+ // ref = ReadBarrier::Mark(ref); // Performed by runtime entrypoint slow path.
+ // }
+ //
+ // Note: the original implementation in ReadBarrier::Barrier is
+ // slightly more complex as:
+ // - it implements the load-load fence using a data dependency on
+ // the high-bits of rb_state, which are expected to be all zeroes;
+ // - it performs additional checks that we do not do here for
+ // performance reasons.
+
+ Register ref_reg = ref.AsRegister<Register>();
+ Register temp_reg = temp.AsRegister<Register>();
+ uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
+
+ // /* int32_t */ monitor = obj->monitor_
+ __ movl(temp_reg, Address(obj, monitor_offset));
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ // /* LockWord */ lock_word = LockWord(monitor)
+ static_assert(sizeof(LockWord) == sizeof(int32_t),
+ "art::LockWord and int32_t have different sizes.");
+ // /* uint32_t */ rb_state = lock_word.ReadBarrierState()
+ __ shrl(temp_reg, Immediate(LockWord::kReadBarrierStateShift));
+ __ andl(temp_reg, Immediate(LockWord::kReadBarrierStateMask));
+ static_assert(
+ LockWord::kReadBarrierStateMask == ReadBarrier::rb_ptr_mask_,
+ "art::LockWord::kReadBarrierStateMask is not equal to art::ReadBarrier::rb_ptr_mask_.");
+
+ // Load fence to prevent load-load reordering.
+ // Note that this is a no-op, thanks to the x86 memory model.
+ GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+
+ // The actual reference load.
+ // /* HeapReference<Object> */ ref = *src
+ __ movl(ref_reg, src);
+
+ // Object* ref = ref_addr->AsMirrorPtr()
+ __ MaybeUnpoisonHeapReference(ref_reg);
+
+ // Slow path used to mark the object `ref` when it is gray.
+ SlowPathCode* slow_path =
+ new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(instruction, ref, ref);
+ AddSlowPath(slow_path);
+
+ // if (rb_state == ReadBarrier::gray_ptr_)
+ // ref = ReadBarrier::Mark(ref);
+ __ cmpl(temp_reg, Immediate(ReadBarrier::gray_ptr_));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void CodeGeneratorX86::GenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index) {
DCHECK(kEmitCompilerReadBarrier);
+ // Insert a slow path based read barrier *after* the reference load.
+ //
// If heap poisoning is enabled, the unpoisoning of the loaded
// reference will be carried out by the runtime within the slow
// path.
@@ -6430,57 +6683,41 @@
ReadBarrierForHeapReferenceSlowPathX86(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
- // TODO: When read barrier has a fast path, add it here.
- /* Currently the read barrier call is inserted after the original load.
- * However, if we have a fast path, we need to perform the load of obj.LockWord *before* the
- * original load. This load-load ordering is required by the read barrier.
- * The fast path/slow path (for Baker's algorithm) should look like:
- *
- * bool isGray = obj.LockWord & kReadBarrierMask;
- * lfence; // load fence or artificial data dependence to prevent load-load reordering
- * ref = obj.field; // this is the original load
- * if (isGray) {
- * ref = Mark(ref); // ideally the slow path just does Mark(ref)
- * }
- */
-
__ jmp(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
-void CodeGeneratorX86::MaybeGenerateReadBarrier(HInstruction* instruction,
- Location out,
- Location ref,
- Location obj,
- uint32_t offset,
- Location index) {
+void CodeGeneratorX86::MaybeGenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index) {
if (kEmitCompilerReadBarrier) {
+ // Baker's read barriers shall be handled by the fast path
+ // (CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier).
+ DCHECK(!kUseBakerReadBarrier);
// If heap poisoning is enabled, unpoisoning will be taken care of
// by the runtime within the slow path.
- GenerateReadBarrier(instruction, out, ref, obj, offset, index);
+ GenerateReadBarrierSlow(instruction, out, ref, obj, offset, index);
} else if (kPoisonHeapReferences) {
__ UnpoisonHeapReference(out.AsRegister<Register>());
}
}
-void CodeGeneratorX86::GenerateReadBarrierForRoot(HInstruction* instruction,
- Location out,
- Location root) {
+void CodeGeneratorX86::GenerateReadBarrierForRootSlow(HInstruction* instruction,
+ Location out,
+ Location root) {
DCHECK(kEmitCompilerReadBarrier);
+ // Insert a slow path based read barrier *after* the GC root load.
+ //
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCode* slow_path =
new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathX86(instruction, out, root);
AddSlowPath(slow_path);
- // TODO: Implement a fast path for ReadBarrierForRoot, performing
- // the following operation (for Baker's algorithm):
- //
- // if (thread.tls32_.is_gc_marking) {
- // root = Mark(root);
- // }
-
__ jmp(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
@@ -6839,7 +7076,7 @@
// TODO: target as memory.
void CodeGeneratorX86::MoveFromReturnRegister(Location target, Primitive::Type type) {
if (!target.IsValid()) {
- DCHECK(type == Primitive::kPrimVoid);
+ DCHECK_EQ(type, Primitive::kPrimVoid);
return;
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index f9403a6..7121799 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -219,11 +219,44 @@
void GenerateShlLong(const Location& loc, int shift);
void GenerateShrLong(const Location& loc, int shift);
void GenerateUShrLong(const Location& loc, int shift);
- void GenerateMemoryBarrier(MemBarrierKind kind);
+
void HandleFieldSet(HInstruction* instruction,
const FieldInfo& field_info,
bool value_can_be_null);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+
+ // Generate a heap reference load using one register `out`:
+ //
+ // out <- *(out + offset)
+ //
+ // while honoring heap poisoning and/or read barriers (if any).
+ // Register `temp` is used when generating a read barrier.
+ void GenerateReferenceLoadOneRegister(HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location temp);
+ // Generate a heap reference load using two different registers
+ // `out` and `obj`:
+ //
+ // out <- *(obj + offset)
+ //
+ // while honoring heap poisoning and/or read barriers (if any).
+ // Register `temp` is used when generating a Baker's read barrier.
+ void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ Location temp);
+ // Generate a GC root reference load:
+ //
+ // root <- *(obj + offset)
+ //
+ // while honoring read barriers (if any).
+ void GenerateGcRootFieldLoad(HInstruction* instruction,
+ Location root,
+ Register obj,
+ uint32_t offset);
+
// Push value to FPU stack. `is_fp` specifies whether the value is floating point or not.
// `is_wide` specifies whether it is long/double or not.
void PushOntoFPStack(Location source, uint32_t temp_offset,
@@ -364,6 +397,8 @@
Register value,
bool value_can_be_null);
+ void GenerateMemoryBarrier(MemBarrierKind kind);
+
Label* GetLabelOf(HBasicBlock* block) const {
return CommonGetLabelOf<Label>(block_labels_, block);
}
@@ -405,7 +440,26 @@
void Finalize(CodeAllocator* allocator) OVERRIDE;
- // Generate a read barrier for a heap reference within `instruction`.
+ // Fast path implementation of ReadBarrier::Barrier for a heap
+ // reference field load when Baker's read barriers are used.
+ void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location out,
+ Register obj,
+ uint32_t offset,
+ Location temp,
+ bool needs_null_check);
+ // Fast path implementation of ReadBarrier::Barrier for a heap
+ // reference array load when Baker's read barriers are used.
+ void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location out,
+ Register obj,
+ uint32_t data_offset,
+ Location index,
+ Location temp,
+ bool needs_null_check);
+
+ // Generate a read barrier for a heap reference within `instruction`
+ // using a slow path.
//
// A read barrier for an object reference read from the heap is
// implemented as a call to the artReadBarrierSlow runtime entry
@@ -422,23 +476,25 @@
// When `index` is provided (i.e. for array accesses), the offset
// value passed to artReadBarrierSlow is adjusted to take `index`
// into account.
- void GenerateReadBarrier(HInstruction* instruction,
- Location out,
- Location ref,
- Location obj,
- uint32_t offset,
- Location index = Location::NoLocation());
+ void GenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index = Location::NoLocation());
- // If read barriers are enabled, generate a read barrier for a heap reference.
- // If heap poisoning is enabled, also unpoison the reference in `out`.
- void MaybeGenerateReadBarrier(HInstruction* instruction,
- Location out,
- Location ref,
- Location obj,
- uint32_t offset,
- Location index = Location::NoLocation());
+ // If read barriers are enabled, generate a read barrier for a heap
+ // reference using a slow path. If heap poisoning is enabled, also
+ // unpoison the reference in `out`.
+ void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index = Location::NoLocation());
- // Generate a read barrier for a GC root within `instruction`.
+ // Generate a read barrier for a GC root within `instruction` using
+ // a slow path.
//
// A read barrier for an object reference GC root is implemented as
// a call to the artReadBarrierForRootSlow runtime entry point,
@@ -448,9 +504,18 @@
//
// The `out` location contains the value returned by
// artReadBarrierForRootSlow.
- void GenerateReadBarrierForRoot(HInstruction* instruction, Location out, Location root);
+ void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
private:
+ // Factored implementation of GenerateFieldLoadWithBakerReadBarrier
+ // and GenerateArrayLoadWithBakerReadBarrier.
+ void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ const Address& src,
+ Location temp,
+ bool needs_null_check);
+
Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
struct PcRelativeDexCacheAccessInfo {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 1fee192..2c5fbc7 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -456,6 +456,56 @@
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86_64);
};
+// Slow path marking an object during a read barrier.
+class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode {
+ public:
+ ReadBarrierMarkSlowPathX86_64(HInstruction* instruction, Location out, Location obj)
+ : instruction_(instruction), out_(out), obj_(obj) {
+ DCHECK(kEmitCompilerReadBarrier);
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathX86_64"; }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ Register reg_out = out_.AsRegister<Register>();
+ DCHECK(locations->CanCall());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
+ DCHECK(instruction_->IsInstanceFieldGet() ||
+ instruction_->IsStaticFieldGet() ||
+ instruction_->IsArrayGet() ||
+ instruction_->IsLoadClass() ||
+ instruction_->IsLoadString() ||
+ instruction_->IsInstanceOf() ||
+ instruction_->IsCheckCast())
+ << "Unexpected instruction in read barrier marking slow path: "
+ << instruction_->DebugName();
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ x86_64_codegen->Move(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), obj_);
+ x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierMark),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickReadBarrierMark, mirror::Object*, mirror::Object*>();
+ x86_64_codegen->Move(out_, Location::RegisterLocation(RAX));
+
+ RestoreLiveRegisters(codegen, locations);
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ HInstruction* const instruction_;
+ const Location out_;
+ const Location obj_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathX86_64);
+};
+
// Slow path generating a read barrier for a heap reference.
class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode {
public:
@@ -477,7 +527,7 @@
// reference load to be instrumented, e.g.:
//
// __ movl(out, Address(out, offset));
- // codegen_->GenerateReadBarrier(instruction, out_loc, out_loc, out_loc, offset);
+ // codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset);
//
// In that case, we have lost the information about the original
// object, and the emitted read barrier cannot work properly.
@@ -493,7 +543,9 @@
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out.AsRegister())) << out_;
DCHECK(!instruction_->IsInvoke() ||
(instruction_->IsInvokeStaticOrDirect() &&
- instruction_->GetLocations()->Intrinsified()));
+ instruction_->GetLocations()->Intrinsified()))
+ << "Unexpected instruction in read barrier for heap reference slow path: "
+ << instruction_->DebugName();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -634,13 +686,17 @@
class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode {
public:
ReadBarrierForRootSlowPathX86_64(HInstruction* instruction, Location out, Location root)
- : instruction_(instruction), out_(out), root_(root) {}
+ : instruction_(instruction), out_(out), root_(root) {
+ DCHECK(kEmitCompilerReadBarrier);
+ }
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg()));
- DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString());
+ DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
+ << "Unexpected instruction in read barrier for GC root slow path: "
+ << instruction_->DebugName();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -731,7 +787,7 @@
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
// temp = thread->string_init_entrypoint
__ gs()->movl(temp.AsRegister<CpuRegister>(),
- Address::Absolute(invoke->GetStringInitOffset(), true));
+ Address::Absolute(invoke->GetStringInitOffset(), /* no_rip */ true));
break;
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
@@ -748,7 +804,7 @@
pc_relative_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
invoke->GetDexCacheArrayOffset());
__ movq(temp.AsRegister<CpuRegister>(),
- Address::Absolute(kDummy32BitOffset, false /* no_rip */));
+ Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
// Bind the label at the end of the "movl" insn.
__ Bind(&pc_relative_dex_cache_patches_.back().label);
break;
@@ -907,7 +963,7 @@
uint32_t dex_pc,
SlowPathCode* slow_path) {
ValidateInvokeRuntime(instruction, slow_path);
- __ gs()->call(Address::Absolute(entry_point_offset, true));
+ __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip */ true));
RecordPcInfo(instruction, dex_pc, slow_path);
}
@@ -1939,7 +1995,7 @@
}
void InstructionCodeGeneratorX86_64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
- GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
+ codegen_->GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
}
void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
@@ -2667,7 +2723,8 @@
} else {
DCHECK(in.GetConstant()->IsIntConstant());
__ movl(out.AsRegister<CpuRegister>(),
- Immediate(static_cast<uint16_t>(in.GetConstant()->AsIntConstant()->GetValue())));
+ Immediate(static_cast<uint16_t>(
+ in.GetConstant()->AsIntConstant()->GetValue())));
}
break;
@@ -2911,7 +2968,8 @@
__ addss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
} else if (second.IsConstant()) {
__ addss(first.AsFpuRegister<XmmRegister>(),
- codegen_->LiteralFloatAddress(second.GetConstant()->AsFloatConstant()->GetValue()));
+ codegen_->LiteralFloatAddress(
+ second.GetConstant()->AsFloatConstant()->GetValue()));
} else {
DCHECK(second.IsStackSlot());
__ addss(first.AsFpuRegister<XmmRegister>(),
@@ -2925,7 +2983,8 @@
__ addsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
} else if (second.IsConstant()) {
__ addsd(first.AsFpuRegister<XmmRegister>(),
- codegen_->LiteralDoubleAddress(second.GetConstant()->AsDoubleConstant()->GetValue()));
+ codegen_->LiteralDoubleAddress(
+ second.GetConstant()->AsDoubleConstant()->GetValue()));
} else {
DCHECK(second.IsDoubleStackSlot());
__ addsd(first.AsFpuRegister<XmmRegister>(),
@@ -3000,7 +3059,8 @@
__ subss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
} else if (second.IsConstant()) {
__ subss(first.AsFpuRegister<XmmRegister>(),
- codegen_->LiteralFloatAddress(second.GetConstant()->AsFloatConstant()->GetValue()));
+ codegen_->LiteralFloatAddress(
+ second.GetConstant()->AsFloatConstant()->GetValue()));
} else {
DCHECK(second.IsStackSlot());
__ subss(first.AsFpuRegister<XmmRegister>(),
@@ -3014,7 +3074,8 @@
__ subsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
} else if (second.IsConstant()) {
__ subsd(first.AsFpuRegister<XmmRegister>(),
- codegen_->LiteralDoubleAddress(second.GetConstant()->AsDoubleConstant()->GetValue()));
+ codegen_->LiteralDoubleAddress(
+ second.GetConstant()->AsDoubleConstant()->GetValue()));
} else {
DCHECK(second.IsDoubleStackSlot());
__ subsd(first.AsFpuRegister<XmmRegister>(),
@@ -3121,7 +3182,8 @@
__ mulss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
} else if (second.IsConstant()) {
__ mulss(first.AsFpuRegister<XmmRegister>(),
- codegen_->LiteralFloatAddress(second.GetConstant()->AsFloatConstant()->GetValue()));
+ codegen_->LiteralFloatAddress(
+ second.GetConstant()->AsFloatConstant()->GetValue()));
} else {
DCHECK(second.IsStackSlot());
__ mulss(first.AsFpuRegister<XmmRegister>(),
@@ -3136,7 +3198,8 @@
__ mulsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
} else if (second.IsConstant()) {
__ mulsd(first.AsFpuRegister<XmmRegister>(),
- codegen_->LiteralDoubleAddress(second.GetConstant()->AsDoubleConstant()->GetValue()));
+ codegen_->LiteralDoubleAddress(
+ second.GetConstant()->AsDoubleConstant()->GetValue()));
} else {
DCHECK(second.IsDoubleStackSlot());
__ mulsd(first.AsFpuRegister<XmmRegister>(),
@@ -3542,7 +3605,8 @@
__ divss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
} else if (second.IsConstant()) {
__ divss(first.AsFpuRegister<XmmRegister>(),
- codegen_->LiteralFloatAddress(second.GetConstant()->AsFloatConstant()->GetValue()));
+ codegen_->LiteralFloatAddress(
+ second.GetConstant()->AsFloatConstant()->GetValue()));
} else {
DCHECK(second.IsStackSlot());
__ divss(first.AsFpuRegister<XmmRegister>(),
@@ -3556,7 +3620,8 @@
__ divsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
} else if (second.IsConstant()) {
__ divsd(first.AsFpuRegister<XmmRegister>(),
- codegen_->LiteralDoubleAddress(second.GetConstant()->AsDoubleConstant()->GetValue()));
+ codegen_->LiteralDoubleAddress(
+ second.GetConstant()->AsDoubleConstant()->GetValue()));
} else {
DCHECK(second.IsDoubleStackSlot());
__ divsd(first.AsFpuRegister<XmmRegister>(),
@@ -3960,10 +4025,10 @@
LOG(FATAL) << "Unimplemented";
}
-void InstructionCodeGeneratorX86_64::GenerateMemoryBarrier(MemBarrierKind kind) {
+void CodeGeneratorX86_64::GenerateMemoryBarrier(MemBarrierKind kind) {
/*
* According to the JSR-133 Cookbook, for x86 only StoreLoad/AnyAny barriers need memory fence.
- * All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86 memory model.
+ * All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86-64 memory model.
* For those cases, all we need to ensure is that there is a scheduling barrier in place.
*/
switch (kind) {
@@ -4003,6 +4068,11 @@
Location::RequiresRegister(),
object_field_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
}
+ if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
+ // We need a temporary register for the read barrier marking slow
+ // path in CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier.
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
@@ -4038,12 +4108,36 @@
break;
}
- case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
+ case Primitive::kPrimInt: {
__ movl(out.AsRegister<CpuRegister>(), Address(base, offset));
break;
}
+ case Primitive::kPrimNot: {
+ // /* HeapReference<Object> */ out = *(base + offset)
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ Location temp_loc = locations->GetTemp(0);
+ // Note that a potential implicit null check is handled in this
+ // CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier call.
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ instruction, out, base, offset, temp_loc, /* needs_null_check */ true);
+ if (is_volatile) {
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
+ } else {
+ __ movl(out.AsRegister<CpuRegister>(), Address(base, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (is_volatile) {
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
+ codegen_->MaybeGenerateReadBarrierSlow(instruction, out, out, base_loc, offset);
+ }
+ break;
+ }
+
case Primitive::kPrimLong: {
__ movq(out.AsRegister<CpuRegister>(), Address(base, offset));
break;
@@ -4064,14 +4158,20 @@
UNREACHABLE();
}
- codegen_->MaybeRecordImplicitNullCheck(instruction);
-
- if (is_volatile) {
- GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ if (field_type == Primitive::kPrimNot) {
+ // Potential implicit null checks, in the case of reference
+ // fields, are handled in the previous switch statement.
+ } else {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
- if (field_type == Primitive::kPrimNot) {
- codegen_->MaybeGenerateReadBarrier(instruction, out, out, base_loc, offset);
+ if (is_volatile) {
+ if (field_type == Primitive::kPrimNot) {
+ // Memory barriers, in the case of references, are also handled
+ // in the previous switch statement.
+ } else {
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
}
}
@@ -4125,7 +4225,7 @@
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
if (is_volatile) {
- GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
}
bool maybe_record_implicit_null_check_done = false;
@@ -4231,7 +4331,7 @@
}
if (is_volatile) {
- GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
}
}
@@ -4408,6 +4508,11 @@
Location::RequiresRegister(),
object_array_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
}
+ // We need a temporary register for the read barrier marking slow
+ // path in CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier.
+ if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
@@ -4415,12 +4520,13 @@
Location obj_loc = locations->InAt(0);
CpuRegister obj = obj_loc.AsRegister<CpuRegister>();
Location index = locations->InAt(1);
- Primitive::Type type = instruction->GetType();
+ Location out_loc = locations->Out();
+ Primitive::Type type = instruction->GetType();
switch (type) {
case Primitive::kPrimBoolean: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ CpuRegister out = out_loc.AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movzxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
@@ -4432,7 +4538,7 @@
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
- CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ CpuRegister out = out_loc.AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movsxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
@@ -4444,7 +4550,7 @@
case Primitive::kPrimShort: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
- CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ CpuRegister out = out_loc.AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movsxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
@@ -4456,7 +4562,7 @@
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ CpuRegister out = out_loc.AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movzxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
@@ -4466,13 +4572,9 @@
break;
}
- case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
- static_assert(
- sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
- "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ case Primitive::kPrimInt: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ CpuRegister out = out_loc.AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movl(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
@@ -4482,9 +4584,46 @@
break;
}
+ case Primitive::kPrimNot: {
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ // /* HeapReference<Object> */ out =
+ // *(obj + data_offset + index * sizeof(HeapReference<Object>))
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ Location temp = locations->GetTemp(0);
+ // Note that a potential implicit null check is handled in this
+ // CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier call.
+ codegen_->GenerateArrayLoadWithBakerReadBarrier(
+ instruction, out_loc, obj, data_offset, index, temp, /* needs_null_check */ true);
+ } else {
+ CpuRegister out = out_loc.AsRegister<CpuRegister>();
+ if (index.IsConstant()) {
+ uint32_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ movl(out, Address(obj, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
+ codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
+ } else {
+ __ movl(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
+ codegen_->MaybeGenerateReadBarrierSlow(
+ instruction, out_loc, out_loc, obj_loc, data_offset, index);
+ }
+ }
+ break;
+ }
+
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ CpuRegister out = out_loc.AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movq(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
@@ -4496,7 +4635,7 @@
case Primitive::kPrimFloat: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
- XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
+ XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
if (index.IsConstant()) {
__ movss(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
@@ -4508,7 +4647,7 @@
case Primitive::kPrimDouble: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
- XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
+ XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
if (index.IsConstant()) {
__ movsd(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
@@ -4522,20 +4661,12 @@
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
}
- codegen_->MaybeRecordImplicitNullCheck(instruction);
if (type == Primitive::kPrimNot) {
- static_assert(
- sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
- "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Location out = locations->Out();
- if (index.IsConstant()) {
- uint32_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- codegen_->MaybeGenerateReadBarrier(instruction, out, out, obj_loc, offset);
- } else {
- codegen_->MaybeGenerateReadBarrier(instruction, out, out, obj_loc, data_offset, index);
- }
+ // Potential implicit null checks, in the case of reference
+ // arrays, are handled in the previous switch statement.
+ } else {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
}
@@ -4659,12 +4790,12 @@
// __ movl(temp2, temp);
// // /* HeapReference<Class> */ temp = temp->component_type_
// __ movl(temp, Address(temp, component_offset));
- // codegen_->GenerateReadBarrier(
+ // codegen_->GenerateReadBarrierSlow(
// instruction, temp_loc, temp_loc, temp2_loc, component_offset);
//
// // /* HeapReference<Class> */ temp2 = register_value->klass_
// __ movl(temp2, Address(register_value, class_offset));
- // codegen_->GenerateReadBarrier(
+ // codegen_->GenerateReadBarrierSlow(
// instruction, temp2_loc, temp2_loc, value, class_offset, temp_loc);
//
// __ cmpl(temp, temp2);
@@ -4890,8 +5021,8 @@
__ testl(value, value);
__ j(kEqual, &is_null);
}
- __ gs()->movq(card, Address::Absolute(
- Thread::CardTableOffset<kX86_64WordSize>().Int32Value(), true));
+ __ gs()->movq(card, Address::Absolute(Thread::CardTableOffset<kX86_64WordSize>().Int32Value(),
+ /* no_rip */ true));
__ movq(temp, object);
__ shrq(temp, Immediate(gc::accounting::CardTable::kCardShift));
__ movb(Address(temp, card, TIMES_1, 0), card);
@@ -4950,8 +5081,9 @@
DCHECK_EQ(slow_path->GetSuccessor(), successor);
}
- __ gs()->cmpw(Address::Absolute(
- Thread::ThreadFlagsOffset<kX86_64WordSize>().Int32Value(), true), Immediate(0));
+ __ gs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64WordSize>().Int32Value(),
+ /* no_rip */ true),
+ Immediate(0));
if (successor == nullptr) {
__ j(kNotEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
@@ -5175,7 +5307,7 @@
Immediate(mirror::Class::kStatusInitialized));
__ j(kLess, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
- // No need for memory fence, thanks to the X86_64 memory model.
+ // No need for memory fence, thanks to the x86-64 memory model.
}
void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
@@ -5206,32 +5338,16 @@
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- uint32_t declaring_class_offset = ArtMethod::DeclaringClassOffset().Int32Value();
- if (kEmitCompilerReadBarrier) {
- // /* GcRoot<mirror::Class>* */ out = &(current_method->declaring_class_)
- __ leaq(out, Address(current_method, declaring_class_offset));
- // /* mirror::Class* */ out = out->Read()
- codegen_->GenerateReadBarrierForRoot(cls, out_loc, out_loc);
- } else {
- // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
- __ movl(out, Address(current_method, declaring_class_offset));
- }
+ // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
+ GenerateGcRootFieldLoad(
+ cls, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
} else {
// /* GcRoot<mirror::Class>[] */ out =
// current_method.ptr_sized_fields_->dex_cache_resolved_types_
__ movq(out, Address(current_method,
ArtMethod::DexCacheResolvedTypesOffset(kX86_64PointerSize).Int32Value()));
-
- size_t cache_offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex());
- if (kEmitCompilerReadBarrier) {
- // /* GcRoot<mirror::Class>* */ out = &out[type_index]
- __ leaq(out, Address(out, cache_offset));
- // /* mirror::Class* */ out = out->Read()
- codegen_->GenerateReadBarrierForRoot(cls, out_loc, out_loc);
- } else {
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- __ movl(out, Address(out, cache_offset));
- }
+ // /* GcRoot<mirror::Class> */ out = out[type_index]
+ GenerateGcRootFieldLoad(cls, out_loc, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
if (!cls->IsInDexCache() || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
@@ -5284,30 +5400,14 @@
CpuRegister out = out_loc.AsRegister<CpuRegister>();
CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
- uint32_t declaring_class_offset = ArtMethod::DeclaringClassOffset().Int32Value();
- if (kEmitCompilerReadBarrier) {
- // /* GcRoot<mirror::Class>* */ out = &(current_method->declaring_class_)
- __ leaq(out, Address(current_method, declaring_class_offset));
- // /* mirror::Class* */ out = out->Read()
- codegen_->GenerateReadBarrierForRoot(load, out_loc, out_loc);
- } else {
- // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
- __ movl(out, Address(current_method, declaring_class_offset));
- }
-
+ // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
+ GenerateGcRootFieldLoad(
+ load, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
// /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_
__ movq(out, Address(out, mirror::Class::DexCacheStringsOffset().Uint32Value()));
-
- size_t cache_offset = CodeGenerator::GetCacheOffset(load->GetStringIndex());
- if (kEmitCompilerReadBarrier) {
- // /* GcRoot<mirror::String>* */ out = &out[string_index]
- __ leaq(out, Address(out, cache_offset));
- // /* mirror::String* */ out = out->Read()
- codegen_->GenerateReadBarrierForRoot(load, out_loc, out_loc);
- } else {
- // /* GcRoot<mirror::String> */ out = out[string_index]
- __ movl(out, Address(out, cache_offset));
- }
+ // /* GcRoot<mirror::String> */ out = out[string_index]
+ GenerateGcRootFieldLoad(
+ load, out_loc, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
if (!load->IsInDexCache()) {
SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
@@ -5319,7 +5419,8 @@
}
static Address GetExceptionTlsAddress() {
- return Address::Absolute(Thread::ExceptionOffset<kX86_64WordSize>().Int32Value(), true);
+ return Address::Absolute(Thread::ExceptionOffset<kX86_64WordSize>().Int32Value(),
+ /* no_rip */ true);
}
void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) {
@@ -5355,6 +5456,14 @@
CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
}
+static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) {
+ return kEmitCompilerReadBarrier &&
+ (kUseBakerReadBarrier ||
+ type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+ type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+ type_check_kind == TypeCheckKind::kArrayObjectCheck);
+}
+
void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
@@ -5380,21 +5489,22 @@
locations->SetOut(Location::RequiresRegister());
// When read barriers are enabled, we need a temporary register for
// some cases.
- if (kEmitCompilerReadBarrier &&
- (type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
+ if (TypeCheckNeedsATemporary(type_check_kind)) {
locations->AddTemp(Location::RequiresRegister());
}
}
void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
LocationSummary* locations = instruction->GetLocations();
Location obj_loc = locations->InAt(0);
CpuRegister obj = obj_loc.AsRegister<CpuRegister>();
Location cls = locations->InAt(1);
Location out_loc = locations->Out();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
+ Location temp_loc = TypeCheckNeedsATemporary(type_check_kind) ?
+ locations->GetTemp(0) :
+ Location::NoLocation();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
@@ -5410,10 +5520,9 @@
}
// /* HeapReference<Class> */ out = obj->klass_
- __ movl(out, Address(obj, class_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, out_loc, obj_loc, class_offset, temp_loc);
- switch (instruction->GetTypeCheckKind()) {
+ switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
if (cls.IsRegister()) {
__ cmpl(out, cls.AsRegister<CpuRegister>());
@@ -5439,17 +5548,8 @@
// object to avoid doing a comparison we know will fail.
NearLabel loop, success;
__ Bind(&loop);
- Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `out` into `temp` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
- __ movl(temp, out);
- }
// /* HeapReference<Class> */ out = out->super_class_
- __ movl(out, Address(out, super_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, super_offset);
+ GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, temp_loc);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -5478,17 +5578,8 @@
__ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
}
__ j(kEqual, &success);
- Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `out` into `temp` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
- __ movl(temp, out);
- }
// /* HeapReference<Class> */ out = out->super_class_
- __ movl(out, Address(out, super_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, super_offset);
+ GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, temp_loc);
__ testl(out, out);
__ j(kNotEqual, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
@@ -5512,17 +5603,8 @@
}
__ j(kEqual, &exact_check);
// Otherwise, we need to check that the object's class is a non-primitive array.
- Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `out` into `temp` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
- __ movl(temp, out);
- }
// /* HeapReference<Class> */ out = out->component_type_
- __ movl(out, Address(out, component_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, component_offset);
+ GenerateReferenceLoadOneRegister(instruction, out_loc, component_offset, temp_loc);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -5566,6 +5648,13 @@
// HInstanceOf instruction (following the runtime calling
// convention), which might be cluttered by the potential first
// read barrier emission at the beginning of this method.
+ //
+ // TODO: Introduce a new runtime entry point taking the object
+ // to test (instead of its class) as argument, and let it deal
+ // with the read barrier issues. This will let us refactor this
+ // case of the `switch` code as it was previously (with a direct
+ // call to the runtime not using a type checking slow path).
+ // This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction,
/* is_fatal */ false);
@@ -5618,27 +5707,27 @@
locations->AddTemp(Location::RequiresRegister());
// When read barriers are enabled, we need an additional temporary
// register for some cases.
- if (kEmitCompilerReadBarrier &&
- (type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
+ if (TypeCheckNeedsATemporary(type_check_kind)) {
locations->AddTemp(Location::RequiresRegister());
}
}
void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
LocationSummary* locations = instruction->GetLocations();
Location obj_loc = locations->InAt(0);
CpuRegister obj = obj_loc.AsRegister<CpuRegister>();
Location cls = locations->InAt(1);
Location temp_loc = locations->GetTemp(0);
CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
+ Location temp2_loc = TypeCheckNeedsATemporary(type_check_kind) ?
+ locations->GetTemp(1) :
+ Location::NoLocation();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
- TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
bool is_type_check_slow_path_fatal =
(type_check_kind == TypeCheckKind::kExactCheck ||
type_check_kind == TypeCheckKind::kAbstractClassCheck ||
@@ -5650,7 +5739,7 @@
is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
- NearLabel done;
+ Label done;
// Avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
__ testl(obj, obj);
@@ -5658,8 +5747,7 @@
}
// /* HeapReference<Class> */ temp = obj->klass_
- __ movl(temp, Address(obj, class_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc);
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
@@ -5681,18 +5769,8 @@
// object to avoid doing a comparison we know will fail.
NearLabel loop, compare_classes;
__ Bind(&loop);
- Location temp2_loc =
- kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `temp` into `temp2` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- CpuRegister temp2 = temp2_loc.AsRegister<CpuRegister>();
- __ movl(temp2, temp);
- }
// /* HeapReference<Class> */ temp = temp->super_class_
- __ movl(temp, Address(temp, super_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, temp2_loc, super_offset);
+ GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, temp2_loc);
// If the class reference currently in `temp` is not null, jump
// to the `compare_classes` label to compare it with the checked
@@ -5705,8 +5783,7 @@
// going into the slow path, as it has been overwritten in the
// meantime.
// /* HeapReference<Class> */ temp = obj->klass_
- __ movl(temp, Address(obj, class_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc);
__ jmp(type_check_slow_path->GetEntryLabel());
__ Bind(&compare_classes);
@@ -5732,18 +5809,8 @@
}
__ j(kEqual, &done);
- Location temp2_loc =
- kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `temp` into `temp2` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- CpuRegister temp2 = temp2_loc.AsRegister<CpuRegister>();
- __ movl(temp2, temp);
- }
// /* HeapReference<Class> */ temp = temp->super_class_
- __ movl(temp, Address(temp, super_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, temp2_loc, super_offset);
+ GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, temp2_loc);
// If the class reference currently in `temp` is not null, jump
// back at the beginning of the loop.
@@ -5755,8 +5822,7 @@
// going into the slow path, as it has been overwritten in the
// meantime.
// /* HeapReference<Class> */ temp = obj->klass_
- __ movl(temp, Address(obj, class_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc);
__ jmp(type_check_slow_path->GetEntryLabel());
break;
}
@@ -5773,19 +5839,8 @@
__ j(kEqual, &done);
// Otherwise, we need to check that the object's class is a non-primitive array.
- Location temp2_loc =
- kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `temp` into `temp2` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- CpuRegister temp2 = temp2_loc.AsRegister<CpuRegister>();
- __ movl(temp2, temp);
- }
// /* HeapReference<Class> */ temp = temp->component_type_
- __ movl(temp, Address(temp, component_offset));
- codegen_->MaybeGenerateReadBarrier(
- instruction, temp_loc, temp_loc, temp2_loc, component_offset);
+ GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, temp2_loc);
// If the component type is not null (i.e. the object is indeed
// an array), jump to label `check_non_primitive_component_type`
@@ -5799,8 +5854,7 @@
// going into the slow path, as it has been overwritten in the
// meantime.
// /* HeapReference<Class> */ temp = obj->klass_
- __ movl(temp, Address(obj, class_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc);
__ jmp(type_check_slow_path->GetEntryLabel());
__ Bind(&check_non_primitive_component_type);
@@ -5808,8 +5862,7 @@
__ j(kEqual, &done);
// Same comment as above regarding `temp` and the slow path.
// /* HeapReference<Class> */ temp = obj->klass_
- __ movl(temp, Address(obj, class_offset));
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc);
__ jmp(type_check_slow_path->GetEntryLabel());
break;
}
@@ -5826,6 +5879,13 @@
// instruction (following the runtime calling convention), which
// might be cluttered by the potential first read barrier
// emission at the beginning of this method.
+ //
+ // TODO: Introduce a new runtime entry point taking the object
+ // to test (instead of its class) as argument, and let it deal
+ // with the read barrier issues. This will let us refactor this
+ // case of the `switch` code as it was previously (with a direct
+ // call to the runtime not using a type checking slow path).
+ // This should also be beneficial for the other cases above.
__ jmp(type_check_slow_path->GetEntryLabel());
break;
}
@@ -5969,14 +6029,227 @@
}
}
-void CodeGeneratorX86_64::GenerateReadBarrier(HInstruction* instruction,
- Location out,
- Location ref,
- Location obj,
- uint32_t offset,
- Location index) {
+void InstructionCodeGeneratorX86_64::GenerateReferenceLoadOneRegister(HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location temp) {
+ CpuRegister out_reg = out.AsRegister<CpuRegister>();
+ if (kEmitCompilerReadBarrier) {
+ if (kUseBakerReadBarrier) {
+ // Load with fast path based Baker's read barrier.
+ // /* HeapReference<Object> */ out = *(out + offset)
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ instruction, out, out_reg, offset, temp, /* needs_null_check */ false);
+ } else {
+ // Load with slow path based read barrier.
+ // Save the value of `out` into `temp` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ __ movl(temp.AsRegister<CpuRegister>(), out_reg);
+ // /* HeapReference<Object> */ out = *(out + offset)
+ __ movl(out_reg, Address(out_reg, offset));
+ codegen_->GenerateReadBarrierSlow(instruction, out, out, temp, offset);
+ }
+ } else {
+ // Plain load with no read barrier.
+ // /* HeapReference<Object> */ out = *(out + offset)
+ __ movl(out_reg, Address(out_reg, offset));
+ __ MaybeUnpoisonHeapReference(out_reg);
+ }
+}
+
+void InstructionCodeGeneratorX86_64::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ Location temp) {
+ CpuRegister out_reg = out.AsRegister<CpuRegister>();
+ CpuRegister obj_reg = obj.AsRegister<CpuRegister>();
+ if (kEmitCompilerReadBarrier) {
+ if (kUseBakerReadBarrier) {
+ // Load with fast path based Baker's read barrier.
+ // /* HeapReference<Object> */ out = *(obj + offset)
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ instruction, out, obj_reg, offset, temp, /* needs_null_check */ false);
+ } else {
+ // Load with slow path based read barrier.
+ // /* HeapReference<Object> */ out = *(obj + offset)
+ __ movl(out_reg, Address(obj_reg, offset));
+ codegen_->GenerateReadBarrierSlow(instruction, out, out, obj, offset);
+ }
+ } else {
+ // Plain load with no read barrier.
+ // /* HeapReference<Object> */ out = *(obj + offset)
+ __ movl(out_reg, Address(obj_reg, offset));
+ __ MaybeUnpoisonHeapReference(out_reg);
+ }
+}
+
+void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad(HInstruction* instruction,
+ Location root,
+ CpuRegister obj,
+ uint32_t offset) {
+ CpuRegister root_reg = root.AsRegister<CpuRegister>();
+ if (kEmitCompilerReadBarrier) {
+ if (kUseBakerReadBarrier) {
+ // Fast path implementation of art::ReadBarrier::BarrierForRoot when
+ // Baker's read barrier are used:
+ //
+ // root = obj.field;
+ // if (Thread::Current()->GetIsGcMarking()) {
+ // root = ReadBarrier::Mark(root)
+ // }
+
+ // /* GcRoot<mirror::Object> */ root = *(obj + offset)
+ __ movl(root_reg, Address(obj, offset));
+ static_assert(
+ sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
+ "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
+ "have different sizes.");
+ static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::CompressedReference<mirror::Object> and int32_t "
+ "have different sizes.");
+
+ // Slow path used to mark the GC root `root`.
+ SlowPathCode* slow_path =
+ new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(instruction, root, root);
+ codegen_->AddSlowPath(slow_path);
+
+ __ gs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86_64WordSize>().Int32Value(),
+ /* no_rip */ true),
+ Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ } else {
+ // GC root loaded through a slow path for read barriers other
+ // than Baker's.
+ // /* GcRoot<mirror::Object>* */ root = obj + offset
+ __ leaq(root_reg, Address(obj, offset));
+ // /* mirror::Object* */ root = root->Read()
+ codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
+ }
+ } else {
+ // Plain GC root load with no read barrier.
+ // /* GcRoot<mirror::Object> */ root = *(obj + offset)
+ __ movl(root_reg, Address(obj, offset));
+ }
+}
+
+void CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ CpuRegister obj,
+ uint32_t offset,
+ Location temp,
+ bool needs_null_check) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // /* HeapReference<Object> */ ref = *(obj + offset)
+ Address src(obj, offset);
+ GenerateReferenceLoadWithBakerReadBarrier(instruction, ref, obj, src, temp, needs_null_check);
+}
+
+void CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ CpuRegister obj,
+ uint32_t data_offset,
+ Location index,
+ Location temp,
+ bool needs_null_check) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // /* HeapReference<Object> */ ref =
+ // *(obj + data_offset + index * sizeof(HeapReference<Object>))
+ Address src = index.IsConstant() ?
+ Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset) :
+ Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset);
+ GenerateReferenceLoadWithBakerReadBarrier(instruction, ref, obj, src, temp, needs_null_check);
+}
+
+void CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ CpuRegister obj,
+ const Address& src,
+ Location temp,
+ bool needs_null_check) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // In slow path based read barriers, the read barrier call is
+ // inserted after the original load. However, in fast path based
+ // Baker's read barriers, we need to perform the load of
+ // mirror::Object::monitor_ *before* the original reference load.
+ // This load-load ordering is required by the read barrier.
+ // The fast path/slow path (for Baker's algorithm) should look like:
+ //
+ // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
+ // lfence; // Load fence or artificial data dependency to prevent load-load reordering
+ // HeapReference<Object> ref = *src; // Original reference load.
+ // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // if (is_gray) {
+ // ref = ReadBarrier::Mark(ref); // Performed by runtime entrypoint slow path.
+ // }
+ //
+ // Note: the original implementation in ReadBarrier::Barrier is
+ // slightly more complex as:
+ // - it implements the load-load fence using a data dependency on
+ // the high-bits of rb_state, which are expected to be all zeroes;
+ // - it performs additional checks that we do not do here for
+ // performance reasons.
+
+ CpuRegister ref_reg = ref.AsRegister<CpuRegister>();
+ CpuRegister temp_reg = temp.AsRegister<CpuRegister>();
+ uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
+
+ // /* int32_t */ monitor = obj->monitor_
+ __ movl(temp_reg, Address(obj, monitor_offset));
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ // /* LockWord */ lock_word = LockWord(monitor)
+ static_assert(sizeof(LockWord) == sizeof(int32_t),
+ "art::LockWord and int32_t have different sizes.");
+ // /* uint32_t */ rb_state = lock_word.ReadBarrierState()
+ __ shrl(temp_reg, Immediate(LockWord::kReadBarrierStateShift));
+ __ andl(temp_reg, Immediate(LockWord::kReadBarrierStateMask));
+ static_assert(
+ LockWord::kReadBarrierStateMask == ReadBarrier::rb_ptr_mask_,
+ "art::LockWord::kReadBarrierStateMask is not equal to art::ReadBarrier::rb_ptr_mask_.");
+
+ // Load fence to prevent load-load reordering.
+ // Note that this is a no-op, thanks to the x86-64 memory model.
+ GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+
+ // The actual reference load.
+ // /* HeapReference<Object> */ ref = *src
+ __ movl(ref_reg, src);
+
+ // Object* ref = ref_addr->AsMirrorPtr()
+ __ MaybeUnpoisonHeapReference(ref_reg);
+
+ // Slow path used to mark the object `ref` when it is gray.
+ SlowPathCode* slow_path =
+ new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(instruction, ref, ref);
+ AddSlowPath(slow_path);
+
+ // if (rb_state == ReadBarrier::gray_ptr_)
+ // ref = ReadBarrier::Mark(ref);
+ __ cmpl(temp_reg, Immediate(ReadBarrier::gray_ptr_));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void CodeGeneratorX86_64::GenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index) {
DCHECK(kEmitCompilerReadBarrier);
+ // Insert a slow path based read barrier *after* the reference load.
+ //
// If heap poisoning is enabled, the unpoisoning of the loaded
// reference will be carried out by the runtime within the slow
// path.
@@ -5990,57 +6263,41 @@
ReadBarrierForHeapReferenceSlowPathX86_64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
- // TODO: When read barrier has a fast path, add it here.
- /* Currently the read barrier call is inserted after the original load.
- * However, if we have a fast path, we need to perform the load of obj.LockWord *before* the
- * original load. This load-load ordering is required by the read barrier.
- * The fast path/slow path (for Baker's algorithm) should look like:
- *
- * bool isGray = obj.LockWord & kReadBarrierMask;
- * lfence; // load fence or artificial data dependence to prevent load-load reordering
- * ref = obj.field; // this is the original load
- * if (isGray) {
- * ref = Mark(ref); // ideally the slow path just does Mark(ref)
- * }
- */
-
__ jmp(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
-void CodeGeneratorX86_64::MaybeGenerateReadBarrier(HInstruction* instruction,
- Location out,
- Location ref,
- Location obj,
- uint32_t offset,
- Location index) {
+void CodeGeneratorX86_64::MaybeGenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index) {
if (kEmitCompilerReadBarrier) {
+ // Baker's read barriers shall be handled by the fast path
+ // (CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier).
+ DCHECK(!kUseBakerReadBarrier);
// If heap poisoning is enabled, unpoisoning will be taken care of
// by the runtime within the slow path.
- GenerateReadBarrier(instruction, out, ref, obj, offset, index);
+ GenerateReadBarrierSlow(instruction, out, ref, obj, offset, index);
} else if (kPoisonHeapReferences) {
__ UnpoisonHeapReference(out.AsRegister<CpuRegister>());
}
}
-void CodeGeneratorX86_64::GenerateReadBarrierForRoot(HInstruction* instruction,
- Location out,
- Location root) {
+void CodeGeneratorX86_64::GenerateReadBarrierForRootSlow(HInstruction* instruction,
+ Location out,
+ Location root) {
DCHECK(kEmitCompilerReadBarrier);
+ // Insert a slow path based read barrier *after* the GC root load.
+ //
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCode* slow_path =
new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathX86_64(instruction, out, root);
AddSlowPath(slow_path);
- // TODO: Implement a fast path for ReadBarrierForRoot, performing
- // the following operation (for Baker's algorithm):
- //
- // if (thread.tls32_.is_gc_marking) {
- // root = Mark(root);
- // }
-
__ jmp(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
@@ -6289,7 +6546,7 @@
// TODO: trg as memory.
void CodeGeneratorX86_64::MoveFromReturnRegister(Location trg, Primitive::Type type) {
if (!trg.IsValid()) {
- DCHECK(type == Primitive::kPrimVoid);
+ DCHECK_EQ(type, Primitive::kPrimVoid);
return;
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 7351fed..dda9ea2 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -213,11 +213,44 @@
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
void GenerateDivRemIntegral(HBinaryOperation* instruction);
void HandleShift(HBinaryOperation* operation);
- void GenerateMemoryBarrier(MemBarrierKind kind);
+
void HandleFieldSet(HInstruction* instruction,
const FieldInfo& field_info,
bool value_can_be_null);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+
+ // Generate a heap reference load using one register `out`:
+ //
+ // out <- *(out + offset)
+ //
+ // while honoring heap poisoning and/or read barriers (if any).
+ // Register `temp` is used when generating a read barrier.
+ void GenerateReferenceLoadOneRegister(HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location temp);
+ // Generate a heap reference load using two different registers
+ // `out` and `obj`:
+ //
+ // out <- *(obj + offset)
+ //
+ // while honoring heap poisoning and/or read barriers (if any).
+ // Register `temp` is used when generating a Baker's read barrier.
+ void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ Location temp);
+ // Generate a GC root reference load:
+ //
+ // root <- *(obj + offset)
+ //
+ // while honoring read barriers (if any).
+ void GenerateGcRootFieldLoad(HInstruction* instruction,
+ Location root,
+ CpuRegister obj,
+ uint32_t offset);
+
void GenerateImplicitNullCheck(HNullCheck* instruction);
void GenerateExplicitNullCheck(HNullCheck* instruction);
void PushOntoFPStack(Location source, uint32_t temp_offset,
@@ -324,6 +357,8 @@
CpuRegister value,
bool value_can_be_null);
+ void GenerateMemoryBarrier(MemBarrierKind kind);
+
// Helper method to move a value between two locations.
void Move(Location destination, Location source);
@@ -356,7 +391,26 @@
return isa_features_;
}
- // Generate a read barrier for a heap reference within `instruction`.
+ // Fast path implementation of ReadBarrier::Barrier for a heap
+ // reference field load when Baker's read barriers are used.
+ void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location out,
+ CpuRegister obj,
+ uint32_t offset,
+ Location temp,
+ bool needs_null_check);
+ // Fast path implementation of ReadBarrier::Barrier for a heap
+ // reference array load when Baker's read barriers are used.
+ void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location out,
+ CpuRegister obj,
+ uint32_t data_offset,
+ Location index,
+ Location temp,
+ bool needs_null_check);
+
+ // Generate a read barrier for a heap reference within `instruction`
+ // using a slow path.
//
// A read barrier for an object reference read from the heap is
// implemented as a call to the artReadBarrierSlow runtime entry
@@ -373,23 +427,25 @@
// When `index` provided (i.e., when it is different from
// Location::NoLocation()), the offset value passed to
// artReadBarrierSlow is adjusted to take `index` into account.
- void GenerateReadBarrier(HInstruction* instruction,
- Location out,
- Location ref,
- Location obj,
- uint32_t offset,
- Location index = Location::NoLocation());
+ void GenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index = Location::NoLocation());
- // If read barriers are enabled, generate a read barrier for a heap reference.
- // If heap poisoning is enabled, also unpoison the reference in `out`.
- void MaybeGenerateReadBarrier(HInstruction* instruction,
- Location out,
- Location ref,
- Location obj,
- uint32_t offset,
- Location index = Location::NoLocation());
+ // If read barriers are enabled, generate a read barrier for a heap
+ // reference using a slow path. If heap poisoning is enabled, also
+ // unpoison the reference in `out`.
+ void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index = Location::NoLocation());
- // Generate a read barrier for a GC root within `instruction`.
+ // Generate a read barrier for a GC root within `instruction` using
+ // a slow path.
//
// A read barrier for an object reference GC root is implemented as
// a call to the artReadBarrierForRootSlow runtime entry point,
@@ -399,7 +455,7 @@
//
// The `out` location contains the value returned by
// artReadBarrierForRootSlow.
- void GenerateReadBarrierForRoot(HInstruction* instruction, Location out, Location root);
+ void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
int ConstantAreaStart() const {
return constant_area_start_;
@@ -424,6 +480,15 @@
HInstruction* instruction);
private:
+ // Factored implementation of GenerateFieldLoadWithBakerReadBarrier
+ // and GenerateArrayLoadWithBakerReadBarrier.
+ void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ CpuRegister obj,
+ const Address& src,
+ Location temp,
+ bool needs_null_check);
+
struct PcRelativeDexCacheAccessInfo {
PcRelativeDexCacheAccessInfo(const DexFile& dex_file, uint32_t element_off)
: target_dex_file(dex_file), element_offset(element_off), label() { }
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 422bb89..fd454d8 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1790,12 +1790,27 @@
Location output_loc = locations->Out();
switch (type) {
- case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
+ case Primitive::kPrimInt: {
Register output = output_loc.AsRegister<Register>();
__ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
- if (type == Primitive::kPrimNot) {
- codegen->MaybeGenerateReadBarrier(invoke, output_loc, output_loc, base_loc, 0U, offset_loc);
+ break;
+ }
+
+ case Primitive::kPrimNot: {
+ Register output = output_loc.AsRegister<Register>();
+ if (kEmitCompilerReadBarrier) {
+ if (kUseBakerReadBarrier) {
+ Location temp = locations->GetTemp(0);
+ codegen->GenerateArrayLoadWithBakerReadBarrier(
+ invoke, output_loc, base, 0U, offset_loc, temp, /* needs_null_check */ false);
+ } else {
+ __ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
+ codegen->GenerateReadBarrierSlow(
+ invoke, output_loc, output_loc, base_loc, 0U, offset_loc);
+ }
+ } else {
+ __ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
+ __ MaybeUnpoisonHeapReference(output);
}
break;
}
@@ -1823,8 +1838,10 @@
}
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke,
- bool is_long, bool is_volatile) {
+static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+ HInvoke* invoke,
+ Primitive::Type type,
+ bool is_volatile) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
@@ -1836,7 +1853,7 @@
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
- if (is_long) {
+ if (type == Primitive::kPrimLong) {
if (is_volatile) {
// Need to use XMM to read volatile.
locations->AddTemp(Location::RequiresFpuRegister());
@@ -1847,25 +1864,30 @@
} else {
locations->SetOut(Location::RequiresRegister());
}
+ if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // We need a temporary register for the read barrier marking slow
+ // path in InstructionCodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier.
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, /* is_long */ false, /* is_volatile */ false);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, /* is_long */ false, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt, /* is_volatile */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, /* is_long */ true, /* is_volatile */ false);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, /* is_long */ true, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong, /* is_volatile */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, /* is_long */ false, /* is_volatile */ false);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, /* is_long */ false, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot, /* is_volatile */ true);
}
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index ac9b245..ce737e3 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1917,16 +1917,30 @@
Location offset_loc = locations->InAt(2);
CpuRegister offset = offset_loc.AsRegister<CpuRegister>();
Location output_loc = locations->Out();
- CpuRegister output = locations->Out().AsRegister<CpuRegister>();
+ CpuRegister output = output_loc.AsRegister<CpuRegister>();
switch (type) {
case Primitive::kPrimInt:
- case Primitive::kPrimNot:
__ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
- if (type == Primitive::kPrimNot) {
- codegen->MaybeGenerateReadBarrier(invoke, output_loc, output_loc, base_loc, 0U, offset_loc);
+ break;
+
+ case Primitive::kPrimNot: {
+ if (kEmitCompilerReadBarrier) {
+ if (kUseBakerReadBarrier) {
+ Location temp = locations->GetTemp(0);
+ codegen->GenerateArrayLoadWithBakerReadBarrier(
+ invoke, output_loc, base, 0U, offset_loc, temp, /* needs_null_check */ false);
+ } else {
+ __ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
+ codegen->GenerateReadBarrierSlow(
+ invoke, output_loc, output_loc, base_loc, 0U, offset_loc);
+ }
+ } else {
+ __ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
+ __ MaybeUnpoisonHeapReference(output);
}
break;
+ }
case Primitive::kPrimLong:
__ movq(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
@@ -1938,7 +1952,9 @@
}
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+ HInvoke* invoke,
+ Primitive::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
@@ -1951,25 +1967,30 @@
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
+ if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // We need a temporary register for the read barrier marking slow
+ // path in InstructionCodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier.
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot);
}
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 9a18635..1a2f2c2 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -471,18 +471,19 @@
/*
* Callback for dumping each positions table entry.
*/
-static bool dumpPositionsCb(void* /*context*/, u4 address, u4 lineNum) {
- fprintf(gOutFile, " 0x%04x line=%d\n", address, lineNum);
+static bool dumpPositionsCb(void* /*context*/, const DexFile::PositionInfo& entry) {
+ fprintf(gOutFile, " 0x%04x line=%d\n", entry.address_, entry.line_);
return false;
}
/*
* Callback for dumping locals table entry.
*/
-static void dumpLocalsCb(void* /*context*/, u2 slot, u4 startAddress, u4 endAddress,
- const char* name, const char* descriptor, const char* signature) {
+static void dumpLocalsCb(void* /*context*/, const DexFile::LocalInfo& entry) {
+ const char* signature = entry.signature_ != nullptr ? entry.signature_ : "";
fprintf(gOutFile, " 0x%04x - 0x%04x reg=%d %s %s %s\n",
- startAddress, endAddress, slot, name, descriptor, signature);
+ entry.start_address_, entry.end_address_, entry.reg_,
+ entry.name_, entry.descriptor_, signature);
}
/*
@@ -900,11 +901,9 @@
// Positions and locals table in the debug info.
bool is_static = (flags & kAccStatic) != 0;
fprintf(gOutFile, " positions : \n");
- pDexFile->DecodeDebugInfo(
- pCode, is_static, idx, dumpPositionsCb, nullptr, nullptr);
+ pDexFile->DecodeDebugPositionInfo(pCode, dumpPositionsCb, nullptr);
fprintf(gOutFile, " locals : \n");
- pDexFile->DecodeDebugInfo(
- pCode, is_static, idx, nullptr, dumpLocalsCb, nullptr);
+ pDexFile->DecodeDebugLocalInfo(pCode, is_static, idx, dumpLocalsCb, nullptr);
}
/*
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index 1d0f75e..d20c169 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -80,10 +80,10 @@
* first line in the method, which *should* correspond to the first
* entry from the table. (Could also use "min" here.)
*/
-static bool positionsCb(void* context, u4 /*address*/, u4 lineNum) {
+static bool positionsCb(void* context, const DexFile::PositionInfo& entry) {
int* pFirstLine = reinterpret_cast<int *>(context);
if (*pFirstLine == -1) {
- *pFirstLine = lineNum;
+ *pFirstLine = entry.line_;
}
return 0;
}
@@ -92,7 +92,7 @@
* Dumps a method.
*/
static void dumpMethod(const DexFile* pDexFile,
- const char* fileName, u4 idx, u4 flags,
+ const char* fileName, u4 idx, u4 flags ATTRIBUTE_UNUSED,
const DexFile::CodeItem* pCode, u4 codeOffset) {
// Abstract and native methods don't get listed.
if (pCode == nullptr || codeOffset == 0) {
@@ -121,9 +121,7 @@
// Find the first line.
int firstLine = -1;
- bool is_static = (flags & kAccStatic) != 0;
- pDexFile->DecodeDebugInfo(
- pCode, is_static, idx, positionsCb, nullptr, &firstLine);
+ pDexFile->DecodeDebugPositionInfo(pCode, positionsCb, &firstLine);
// Method signature.
const Signature signature = pDexFile->GetMethodSignature(pMethodId);
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index be33b0e..7141181 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -166,6 +166,7 @@
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
+ qpoints->pReadBarrierMark = artReadBarrierMark;
qpoints->pReadBarrierSlow = artReadBarrierSlow;
qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
}
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 63285a4..5c8ff8f 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -149,6 +149,7 @@
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
+ qpoints->pReadBarrierMark = artReadBarrierMark;
qpoints->pReadBarrierSlow = artReadBarrierSlow;
qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
};
diff --git a/runtime/arch/mips/entrypoints_direct_mips.h b/runtime/arch/mips/entrypoints_direct_mips.h
index 74e7638..0d01ad5 100644
--- a/runtime/arch/mips/entrypoints_direct_mips.h
+++ b/runtime/arch/mips/entrypoints_direct_mips.h
@@ -45,6 +45,7 @@
entrypoint == kQuickCmpgFloat ||
entrypoint == kQuickCmplDouble ||
entrypoint == kQuickCmplFloat ||
+ entrypoint == kQuickReadBarrierMark ||
entrypoint == kQuickReadBarrierSlow ||
entrypoint == kQuickReadBarrierForRootSlow;
}
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index cba427d..51eb77f 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -274,6 +274,8 @@
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
static_assert(!IsDirectEntrypoint(kQuickReadBarrierJni), "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMark = artReadBarrierMark;
+ static_assert(IsDirectEntrypoint(kQuickReadBarrierMark), "Direct C stub not marked direct.");
qpoints->pReadBarrierSlow = artReadBarrierSlow;
static_assert(IsDirectEntrypoint(kQuickReadBarrierSlow), "Direct C stub not marked direct.");
qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 89f54dd..4bdb38e 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -180,6 +180,7 @@
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
+ qpoints->pReadBarrierMark = artReadBarrierMark;
qpoints->pReadBarrierSlow = artReadBarrierSlow;
qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
};
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index aca49cc..e593f39 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -28,6 +28,7 @@
const mirror::Class* ref_class);
// Read barrier entrypoints.
+extern "C" mirror::Object* art_quick_read_barrier_mark(mirror::Object*);
extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t);
extern "C" mirror::Object* art_quick_read_barrier_for_root_slow(GcRoot<mirror::Object>*);
@@ -158,6 +159,7 @@
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
+ qpoints->pReadBarrierMark = art_quick_read_barrier_mark;
qpoints->pReadBarrierSlow = art_quick_read_barrier_slow;
qpoints->pReadBarrierForRootSlow = art_quick_read_barrier_for_root_slow;
};
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 463c9cf..da30331 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1686,6 +1686,14 @@
UNREACHABLE
END_FUNCTION art_nested_signal_return
+DEFINE_FUNCTION art_quick_read_barrier_mark
+ PUSH eax // pass arg1 - obj
+ call SYMBOL(artReadBarrierMark) // artReadBarrierMark(obj)
+ addl LITERAL(4), %esp // pop argument
+ CFI_ADJUST_CFA_OFFSET(-4)
+ ret
+END_FUNCTION art_quick_read_barrier_mark
+
DEFINE_FUNCTION art_quick_read_barrier_slow
PUSH edx // pass arg3 - offset
PUSH ecx // pass arg2 - obj
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index ebe6d40..0a5d14a 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -29,6 +29,7 @@
const mirror::Class* ref_class);
// Read barrier entrypoints.
+extern "C" mirror::Object* art_quick_read_barrier_mark(mirror::Object*);
extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t);
extern "C" mirror::Object* art_quick_read_barrier_for_root_slow(GcRoot<mirror::Object>*);
@@ -162,6 +163,7 @@
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
+ qpoints->pReadBarrierMark = art_quick_read_barrier_mark;
qpoints->pReadBarrierSlow = art_quick_read_barrier_slow;
qpoints->pReadBarrierForRootSlow = art_quick_read_barrier_for_root_slow;
#endif // __APPLE__
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 17d277e..883da96 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1712,6 +1712,17 @@
UNREACHABLE
END_FUNCTION art_nested_signal_return
+DEFINE_FUNCTION art_quick_read_barrier_mark
+ SETUP_FP_CALLEE_SAVE_FRAME
+ subq LITERAL(8), %rsp // Alignment padding.
+ CFI_ADJUST_CFA_OFFSET(8)
+ call SYMBOL(artReadBarrierMark) // artReadBarrierMark(obj)
+ addq LITERAL(8), %rsp
+ CFI_ADJUST_CFA_OFFSET(-8)
+ RESTORE_FP_CALLEE_SAVE_FRAME
+ ret
+END_FUNCTION art_quick_read_barrier_slow
+
DEFINE_FUNCTION art_quick_read_barrier_slow
SETUP_FP_CALLEE_SAVE_FRAME
subq LITERAL(8), %rsp // Alignment padding.
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index cb01a64..2b4826e 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -122,7 +122,7 @@
art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_pos.
-#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 168 * __SIZEOF_POINTER__)
+#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 169 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
art::Thread::ThreadLocalPosOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_end.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 4aacf5b..ad52ca5 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1536,10 +1536,10 @@
int numItems;
JDWP::ExpandBuf* pReply;
- static bool Callback(void* context, uint32_t address, uint32_t line_number) {
+ static bool Callback(void* context, const DexFile::PositionInfo& entry) {
DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
- expandBufAdd8BE(pContext->pReply, address);
- expandBufAdd4BE(pContext->pReply, line_number);
+ expandBufAdd8BE(pContext->pReply, entry.address_);
+ expandBufAdd4BE(pContext->pReply, entry.line_);
pContext->numItems++;
return false;
}
@@ -1569,8 +1569,7 @@
context.pReply = pReply;
if (code_item != nullptr) {
- m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
- DebugCallbackContext::Callback, nullptr, &context);
+ m->GetDexFile()->DecodeDebugPositionInfo(code_item, DebugCallbackContext::Callback, &context);
}
JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
@@ -1584,25 +1583,26 @@
size_t variable_count;
bool with_generic;
- static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress,
- const char* name, const char* descriptor, const char* signature)
+ static void Callback(void* context, const DexFile::LocalInfo& entry)
SHARED_REQUIRES(Locks::mutator_lock_) {
DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
+ uint16_t slot = entry.reg_;
VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
- pContext->variable_count, startAddress, endAddress - startAddress,
- name, descriptor, signature, slot,
+ pContext->variable_count, entry.start_address_,
+ entry.end_address_ - entry.start_address_,
+ entry.name_, entry.descriptor_, entry.signature_, slot,
MangleSlot(slot, pContext->method));
slot = MangleSlot(slot, pContext->method);
- expandBufAdd8BE(pContext->pReply, startAddress);
- expandBufAddUtf8String(pContext->pReply, name);
- expandBufAddUtf8String(pContext->pReply, descriptor);
+ expandBufAdd8BE(pContext->pReply, entry.start_address_);
+ expandBufAddUtf8String(pContext->pReply, entry.name_);
+ expandBufAddUtf8String(pContext->pReply, entry.descriptor_);
if (pContext->with_generic) {
- expandBufAddUtf8String(pContext->pReply, signature);
+ expandBufAddUtf8String(pContext->pReply, entry.signature_);
}
- expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
+ expandBufAdd4BE(pContext->pReply, entry.end_address_- entry.start_address_);
expandBufAdd4BE(pContext->pReply, slot);
++pContext->variable_count;
@@ -1627,8 +1627,8 @@
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item != nullptr) {
- m->GetDexFile()->DecodeDebugInfo(
- code_item, m->IsStatic(), m->GetDexMethodIndex(), nullptr, DebugCallbackContext::Callback,
+ m->GetDexFile()->DecodeDebugLocalInfo(
+ code_item, m->IsStatic(), m->GetDexMethodIndex(), DebugCallbackContext::Callback,
&context);
}
@@ -3715,19 +3715,19 @@
code_item_(code_item), last_pc_valid(false), last_pc(0) {
}
- static bool Callback(void* raw_context, uint32_t address, uint32_t line_number_cb) {
+ static bool Callback(void* raw_context, const DexFile::PositionInfo& entry) {
DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
- if (static_cast<int32_t>(line_number_cb) == context->line_number_) {
+ if (static_cast<int32_t>(entry.line_) == context->line_number_) {
if (!context->last_pc_valid) {
// Everything from this address until the next line change is ours.
- context->last_pc = address;
+ context->last_pc = entry.address_;
context->last_pc_valid = true;
}
// Otherwise, if we're already in a valid range for this line,
// just keep going (shouldn't really happen)...
} else if (context->last_pc_valid) { // and the line number is new
// Add everything from the last entry up until here to the set
- for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) {
+ for (uint32_t dex_pc = context->last_pc; dex_pc < entry.address_; ++dex_pc) {
context->single_step_control_->AddDexPc(dex_pc);
}
context->last_pc_valid = false;
@@ -3768,8 +3768,7 @@
if (m != nullptr && !m->IsNative()) {
const DexFile::CodeItem* const code_item = m->GetCodeItem();
DebugCallbackContext context(single_step_control, line_number, code_item);
- m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
- DebugCallbackContext::Callback, nullptr, &context);
+ m->GetDexFile()->DecodeDebugPositionInfo(code_item, DebugCallbackContext::Callback, &context);
}
// Activate single-step in the thread.
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 4e15e80..880d3e0 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -767,8 +767,7 @@
// A method with no line number info should return -1
LineNumFromPcContext context(rel_pc, -1);
- DecodeDebugInfo(code_item, method->IsStatic(), method->GetDexMethodIndex(), LineNumForPcCb,
- nullptr, &context);
+ DecodeDebugPositionInfo(code_item, LineNumForPcCb, &context);
return context.line_num_;
}
@@ -805,45 +804,48 @@
}
}
-void DexFile::DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32_t method_idx,
- DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
- void* context, const uint8_t* stream, LocalInfo* local_in_reg)
- const {
- uint32_t line = DecodeUnsignedLeb128(&stream);
- uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
- uint16_t arg_reg = code_item->registers_size_ - code_item->ins_size_;
- uint32_t address = 0;
- bool need_locals = (local_cb != nullptr);
+bool DexFile::DecodeDebugLocalInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx,
+ DexDebugNewLocalCb local_cb, void* context) const {
+ DCHECK(local_cb != nullptr);
+ if (code_item == nullptr) {
+ return false;
+ }
+ const uint8_t* stream = GetDebugInfoStream(code_item);
+ if (stream == nullptr) {
+ return false;
+ }
+ std::vector<LocalInfo> local_in_reg(code_item->registers_size_);
+ uint16_t arg_reg = code_item->registers_size_ - code_item->ins_size_;
if (!is_static) {
- if (need_locals) {
- const char* descriptor = GetMethodDeclaringClassDescriptor(GetMethodId(method_idx));
- local_in_reg[arg_reg].name_ = "this";
- local_in_reg[arg_reg].descriptor_ = descriptor;
- local_in_reg[arg_reg].signature_ = nullptr;
- local_in_reg[arg_reg].start_address_ = 0;
- local_in_reg[arg_reg].is_live_ = true;
- }
+ const char* descriptor = GetMethodDeclaringClassDescriptor(GetMethodId(method_idx));
+ local_in_reg[arg_reg].name_ = "this";
+ local_in_reg[arg_reg].descriptor_ = descriptor;
+ local_in_reg[arg_reg].signature_ = nullptr;
+ local_in_reg[arg_reg].start_address_ = 0;
+ local_in_reg[arg_reg].reg_ = arg_reg;
+ local_in_reg[arg_reg].is_live_ = true;
arg_reg++;
}
DexFileParameterIterator it(*this, GetMethodPrototype(GetMethodId(method_idx)));
- for (uint32_t i = 0; i < parameters_size && it.HasNext(); ++i, it.Next()) {
+ DecodeUnsignedLeb128(&stream); // Line.
+ uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
+ uint32_t i;
+ for (i = 0; i < parameters_size && it.HasNext(); ++i, it.Next()) {
if (arg_reg >= code_item->registers_size_) {
LOG(ERROR) << "invalid stream - arg reg >= reg size (" << arg_reg
<< " >= " << code_item->registers_size_ << ") in " << GetLocation();
- return;
+ return false;
}
- uint32_t id = DecodeUnsignedLeb128P1(&stream);
+ uint32_t name_idx = DecodeUnsignedLeb128P1(&stream);
const char* descriptor = it.GetDescriptor();
- if (need_locals && id != kDexNoIndex) {
- const char* name = StringDataByIdx(id);
- local_in_reg[arg_reg].name_ = name;
- local_in_reg[arg_reg].descriptor_ = descriptor;
- local_in_reg[arg_reg].signature_ = nullptr;
- local_in_reg[arg_reg].start_address_ = address;
- local_in_reg[arg_reg].is_live_ = true;
- }
+ local_in_reg[arg_reg].name_ = StringDataByIdx(name_idx);
+ local_in_reg[arg_reg].descriptor_ = descriptor;
+ local_in_reg[arg_reg].signature_ = nullptr;
+ local_in_reg[arg_reg].start_address_ = 0;
+ local_in_reg[arg_reg].reg_ = arg_reg;
+ local_in_reg[arg_reg].is_live_ = true;
switch (*descriptor) {
case 'D':
case 'J':
@@ -854,152 +856,188 @@
break;
}
}
-
- if (it.HasNext()) {
+ if (i != parameters_size || it.HasNext()) {
LOG(ERROR) << "invalid stream - problem with parameter iterator in " << GetLocation()
<< " for method " << PrettyMethod(method_idx, *this);
- return;
+ return false;
}
+ uint32_t address = 0;
for (;;) {
uint8_t opcode = *stream++;
- uint16_t reg;
- uint32_t name_idx;
- uint32_t descriptor_idx;
- uint32_t signature_idx = 0;
-
switch (opcode) {
case DBG_END_SEQUENCE:
- return;
-
+ // Emit all variables which are still alive at the end of the method.
+ for (uint16_t reg = 0; reg < code_item->registers_size_; reg++) {
+ if (local_in_reg[reg].is_live_) {
+ local_in_reg[reg].end_address_ = code_item->insns_size_in_code_units_;
+ local_cb(context, local_in_reg[reg]);
+ }
+ }
+ return true;
case DBG_ADVANCE_PC:
address += DecodeUnsignedLeb128(&stream);
break;
-
case DBG_ADVANCE_LINE:
- line += DecodeSignedLeb128(&stream);
+ DecodeSignedLeb128(&stream); // Line.
break;
-
case DBG_START_LOCAL:
- case DBG_START_LOCAL_EXTENDED:
- reg = DecodeUnsignedLeb128(&stream);
- if (reg > code_item->registers_size_) {
- LOG(ERROR) << "invalid stream - reg > reg size (" << reg << " > "
+ case DBG_START_LOCAL_EXTENDED: {
+ uint16_t reg = DecodeUnsignedLeb128(&stream);
+ if (reg >= code_item->registers_size_) {
+ LOG(ERROR) << "invalid stream - reg >= reg size (" << reg << " >= "
<< code_item->registers_size_ << ") in " << GetLocation();
- return;
+ return false;
}
- name_idx = DecodeUnsignedLeb128P1(&stream);
- descriptor_idx = DecodeUnsignedLeb128P1(&stream);
+ uint32_t name_idx = DecodeUnsignedLeb128P1(&stream);
+ uint32_t descriptor_idx = DecodeUnsignedLeb128P1(&stream);
+ uint32_t signature_idx = kDexNoIndex;
if (opcode == DBG_START_LOCAL_EXTENDED) {
signature_idx = DecodeUnsignedLeb128P1(&stream);
}
// Emit what was previously there, if anything
- if (need_locals) {
- InvokeLocalCbIfLive(context, reg, address, local_in_reg, local_cb);
+ if (local_in_reg[reg].is_live_) {
+ local_in_reg[reg].end_address_ = address;
+ local_cb(context, local_in_reg[reg]);
+ }
- local_in_reg[reg].name_ = StringDataByIdx(name_idx);
- local_in_reg[reg].descriptor_ = StringByTypeIdx(descriptor_idx);
- local_in_reg[reg].signature_ =
- (opcode == DBG_START_LOCAL_EXTENDED) ? StringDataByIdx(signature_idx)
- : nullptr;
+ local_in_reg[reg].name_ = StringDataByIdx(name_idx);
+ local_in_reg[reg].descriptor_ = StringByTypeIdx(descriptor_idx);
+ local_in_reg[reg].signature_ = StringDataByIdx(signature_idx);
+ local_in_reg[reg].start_address_ = address;
+ local_in_reg[reg].reg_ = reg;
+ local_in_reg[reg].is_live_ = true;
+ break;
+ }
+ case DBG_END_LOCAL: {
+ uint16_t reg = DecodeUnsignedLeb128(&stream);
+ if (reg >= code_item->registers_size_) {
+ LOG(ERROR) << "invalid stream - reg >= reg size (" << reg << " >= "
+ << code_item->registers_size_ << ") in " << GetLocation();
+ return false;
+ }
+ if (!local_in_reg[reg].is_live_) {
+ LOG(ERROR) << "invalid stream - end without start in " << GetLocation();
+ return false;
+ }
+ local_in_reg[reg].end_address_ = address;
+ local_cb(context, local_in_reg[reg]);
+ local_in_reg[reg].is_live_ = false;
+ break;
+ }
+ case DBG_RESTART_LOCAL: {
+ uint16_t reg = DecodeUnsignedLeb128(&stream);
+ if (reg >= code_item->registers_size_) {
+ LOG(ERROR) << "invalid stream - reg >= reg size (" << reg << " >= "
+ << code_item->registers_size_ << ") in " << GetLocation();
+ return false;
+ }
+ // If the register is live, the "restart" is superfluous,
+ // and we don't want to mess with the existing start address.
+ if (!local_in_reg[reg].is_live_) {
local_in_reg[reg].start_address_ = address;
local_in_reg[reg].is_live_ = true;
}
break;
-
- case DBG_END_LOCAL:
- reg = DecodeUnsignedLeb128(&stream);
- if (reg > code_item->registers_size_) {
- LOG(ERROR) << "invalid stream - reg > reg size (" << reg << " > "
- << code_item->registers_size_ << ") in " << GetLocation();
- return;
- }
-
- if (need_locals) {
- InvokeLocalCbIfLive(context, reg, address, local_in_reg, local_cb);
- local_in_reg[reg].is_live_ = false;
- }
- break;
-
- case DBG_RESTART_LOCAL:
- reg = DecodeUnsignedLeb128(&stream);
- if (reg > code_item->registers_size_) {
- LOG(ERROR) << "invalid stream - reg > reg size (" << reg << " > "
- << code_item->registers_size_ << ") in " << GetLocation();
- return;
- }
-
- if (need_locals) {
- if (local_in_reg[reg].name_ == nullptr || local_in_reg[reg].descriptor_ == nullptr) {
- LOG(ERROR) << "invalid stream - no name or descriptor in " << GetLocation();
- return;
- }
-
- // If the register is live, the "restart" is superfluous,
- // and we don't want to mess with the existing start address.
- if (!local_in_reg[reg].is_live_) {
- local_in_reg[reg].start_address_ = address;
- local_in_reg[reg].is_live_ = true;
- }
- }
- break;
-
+ }
case DBG_SET_PROLOGUE_END:
case DBG_SET_EPILOGUE_BEGIN:
- case DBG_SET_FILE:
break;
+ case DBG_SET_FILE:
+ DecodeUnsignedLeb128P1(&stream); // name.
+ break;
+ default:
+ address += (opcode - DBG_FIRST_SPECIAL) / DBG_LINE_RANGE;
+ break;
+ }
+ }
+}
+bool DexFile::DecodeDebugPositionInfo(const CodeItem* code_item, DexDebugNewPositionCb position_cb,
+ void* context) const {
+ DCHECK(position_cb != nullptr);
+ if (code_item == nullptr) {
+ return false;
+ }
+ const uint8_t* stream = GetDebugInfoStream(code_item);
+ if (stream == nullptr) {
+ return false;
+ }
+
+ PositionInfo entry = PositionInfo();
+ entry.line_ = DecodeUnsignedLeb128(&stream);
+ uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
+ for (uint32_t i = 0; i < parameters_size; ++i) {
+ DecodeUnsignedLeb128P1(&stream); // Parameter name.
+ }
+
+ for (;;) {
+ uint8_t opcode = *stream++;
+ switch (opcode) {
+ case DBG_END_SEQUENCE:
+ return true; // end of stream.
+ case DBG_ADVANCE_PC:
+ entry.address_ += DecodeUnsignedLeb128(&stream);
+ break;
+ case DBG_ADVANCE_LINE:
+ entry.line_ += DecodeSignedLeb128(&stream);
+ break;
+ case DBG_START_LOCAL:
+ DecodeUnsignedLeb128(&stream); // reg.
+ DecodeUnsignedLeb128P1(&stream); // name.
+ DecodeUnsignedLeb128P1(&stream); // descriptor.
+ break;
+ case DBG_START_LOCAL_EXTENDED:
+ DecodeUnsignedLeb128(&stream); // reg.
+ DecodeUnsignedLeb128P1(&stream); // name.
+ DecodeUnsignedLeb128P1(&stream); // descriptor.
+ DecodeUnsignedLeb128P1(&stream); // signature.
+ break;
+ case DBG_END_LOCAL:
+ case DBG_RESTART_LOCAL:
+ DecodeUnsignedLeb128(&stream); // reg.
+ break;
+ case DBG_SET_PROLOGUE_END:
+ entry.prologue_end_ = true;
+ break;
+ case DBG_SET_EPILOGUE_BEGIN:
+ entry.epilogue_begin_ = true;
+ break;
+ case DBG_SET_FILE: {
+ uint32_t name_idx = DecodeUnsignedLeb128P1(&stream);
+ entry.source_file_ = StringDataByIdx(name_idx);
+ break;
+ }
default: {
int adjopcode = opcode - DBG_FIRST_SPECIAL;
-
- address += adjopcode / DBG_LINE_RANGE;
- line += DBG_LINE_BASE + (adjopcode % DBG_LINE_RANGE);
-
- if (position_cb != nullptr) {
- if (position_cb(context, address, line)) {
- // early exit
- return;
- }
+ entry.address_ += adjopcode / DBG_LINE_RANGE;
+ entry.line_ += DBG_LINE_BASE + (adjopcode % DBG_LINE_RANGE);
+ if (position_cb(context, entry)) {
+ return true; // early exit.
}
+ entry.prologue_end_ = false;
+ entry.epilogue_begin_ = false;
break;
}
}
}
}
-void DexFile::DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx,
- DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
- void* context) const {
- DCHECK(code_item != nullptr);
- const uint8_t* stream = GetDebugInfoStream(code_item);
- std::unique_ptr<LocalInfo[]> local_in_reg(local_cb != nullptr ?
- new LocalInfo[code_item->registers_size_] :
- nullptr);
- if (stream != nullptr) {
- DecodeDebugInfo0(code_item, is_static, method_idx, position_cb, local_cb, context, stream,
- &local_in_reg[0]);
- }
- for (int reg = 0; reg < code_item->registers_size_; reg++) {
- InvokeLocalCbIfLive(context, reg, code_item->insns_size_in_code_units_, &local_in_reg[0],
- local_cb);
- }
-}
-
-bool DexFile::LineNumForPcCb(void* raw_context, uint32_t address, uint32_t line_num) {
+bool DexFile::LineNumForPcCb(void* raw_context, const PositionInfo& entry) {
LineNumFromPcContext* context = reinterpret_cast<LineNumFromPcContext*>(raw_context);
// We know that this callback will be called in
// ascending address order, so keep going until we find
// a match or we've just gone past it.
- if (address > context->address_) {
+ if (entry.address_ > context->address_) {
// The line number from the previous positions callback
// wil be the final result.
return true;
} else {
- context->line_num_ = line_num;
- return address == context->address_;
+ context->line_num_ = entry.line_;
+ return entry.address_ == context->address_;
}
}
@@ -2210,22 +2248,47 @@
EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(
const DexFile& dex_file,
const DexFile::ClassDef& class_def)
- : EncodedStaticFieldValueIterator(dex_file, nullptr, nullptr,
- nullptr, class_def) {
+ : EncodedStaticFieldValueIterator(dex_file,
+ nullptr,
+ nullptr,
+ nullptr,
+ class_def,
+ -1,
+ kByte) {
}
EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(
- const DexFile& dex_file, Handle<mirror::DexCache>* dex_cache,
- Handle<mirror::ClassLoader>* class_loader, ClassLinker* linker,
+ const DexFile& dex_file,
+ Handle<mirror::DexCache>* dex_cache,
+ Handle<mirror::ClassLoader>* class_loader,
+ ClassLinker* linker,
const DexFile::ClassDef& class_def)
+ : EncodedStaticFieldValueIterator(dex_file,
+ dex_cache, class_loader,
+ linker,
+ class_def,
+ -1,
+ kByte) {
+ DCHECK(dex_cache_ != nullptr);
+ DCHECK(class_loader_ != nullptr);
+}
+
+EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(
+ const DexFile& dex_file,
+ Handle<mirror::DexCache>* dex_cache,
+ Handle<mirror::ClassLoader>* class_loader,
+ ClassLinker* linker,
+ const DexFile::ClassDef& class_def,
+ size_t pos,
+ ValueType type)
: dex_file_(dex_file),
dex_cache_(dex_cache),
class_loader_(class_loader),
linker_(linker),
array_size_(),
- pos_(-1),
- type_(kByte) {
- ptr_ = dex_file_.GetEncodedStaticFieldValuesArray(class_def);
+ pos_(pos),
+ type_(type) {
+ ptr_ = dex_file.GetEncodedStaticFieldValuesArray(class_def);
if (ptr_ == nullptr) {
array_size_ = 0;
} else {
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 6b019f1..8a3db6c 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -819,20 +819,50 @@
}
}
+ struct PositionInfo {
+ PositionInfo()
+ : address_(0),
+ line_(0),
+ source_file_(nullptr),
+ prologue_end_(false),
+ epilogue_begin_(false) {
+ }
+
+ uint32_t address_; // In 16-bit code units.
+ uint32_t line_; // Source code line number starting at 1.
+ const char* source_file_; // nullptr if the file from ClassDef still applies.
+ bool prologue_end_;
+ bool epilogue_begin_;
+ };
+
// Callback for "new position table entry".
// Returning true causes the decoder to stop early.
- typedef bool (*DexDebugNewPositionCb)(void* context, uint32_t address, uint32_t line_num);
+ typedef bool (*DexDebugNewPositionCb)(void* context, const PositionInfo& entry);
- // Callback for "new locals table entry". "signature" is an empty string
- // if no signature is available for an entry.
- typedef void (*DexDebugNewLocalCb)(void* context, uint16_t reg,
- uint32_t start_address,
- uint32_t end_address,
- const char* name,
- const char* descriptor,
- const char* signature);
+ struct LocalInfo {
+ LocalInfo()
+ : name_(nullptr),
+ descriptor_(nullptr),
+ signature_(nullptr),
+ start_address_(0),
+ end_address_(0),
+ reg_(0),
+ is_live_(false) {
+ }
- static bool LineNumForPcCb(void* context, uint32_t address, uint32_t line_num);
+ const char* name_; // E.g., list. It can be nullptr if unknown.
+ const char* descriptor_; // E.g., Ljava/util/LinkedList;
+ const char* signature_; // E.g., java.util.LinkedList<java.lang.Integer>
+ uint32_t start_address_; // PC location where the local is first defined.
+ uint32_t end_address_; // PC location where the local is no longer defined.
+ uint16_t reg_; // Dex register which stores the values.
+ bool is_live_; // Is the local defined and live.
+ };
+
+ // Callback for "new locals table entry".
+ typedef void (*DexDebugNewLocalCb)(void* context, const LocalInfo& entry);
+
+ static bool LineNumForPcCb(void* context, const PositionInfo& entry);
const AnnotationsDirectoryItem* GetAnnotationsDirectory(const ClassDef& class_def) const {
if (class_def.annotations_off_ == 0) {
@@ -1044,21 +1074,6 @@
DBG_LINE_RANGE = 15,
};
- struct LocalInfo {
- LocalInfo()
- : name_(nullptr), descriptor_(nullptr), signature_(nullptr), start_address_(0),
- is_live_(false) {}
-
- const char* name_; // E.g., list
- const char* descriptor_; // E.g., Ljava/util/LinkedList;
- const char* signature_; // E.g., java.util.LinkedList<java.lang.Integer>
- uint16_t start_address_; // PC location where the local is first defined.
- bool is_live_; // Is the local defined and live.
-
- private:
- DISALLOW_COPY_AND_ASSIGN(LocalInfo);
- };
-
struct LineNumFromPcContext {
LineNumFromPcContext(uint32_t address, uint32_t line_num)
: address_(address), line_num_(line_num) {}
@@ -1068,15 +1083,6 @@
DISALLOW_COPY_AND_ASSIGN(LineNumFromPcContext);
};
- void InvokeLocalCbIfLive(void* context, int reg, uint32_t end_address,
- LocalInfo* local_in_reg, DexDebugNewLocalCb local_cb) const {
- if (local_cb != nullptr && local_in_reg[reg].is_live_) {
- local_cb(context, reg, local_in_reg[reg].start_address_, end_address,
- local_in_reg[reg].name_, local_in_reg[reg].descriptor_,
- local_in_reg[reg].signature_ != nullptr ? local_in_reg[reg].signature_ : "");
- }
- }
-
// Determine the source file line number based on the program counter.
// "pc" is an offset, in 16-bit units, from the start of the method's code.
//
@@ -1088,9 +1094,13 @@
int32_t GetLineNumFromPC(ArtMethod* method, uint32_t rel_pc) const
SHARED_REQUIRES(Locks::mutator_lock_);
- void DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx,
- DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
- void* context) const;
+ // Returns false if there is no debugging information or if it can not be decoded.
+ bool DecodeDebugLocalInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx,
+ DexDebugNewLocalCb local_cb, void* context) const;
+
+ // Returns false if there is no debugging information or if it can not be decoded.
+ bool DecodeDebugPositionInfo(const CodeItem* code_item, DexDebugNewPositionCb position_cb,
+ void* context) const;
const char* GetSourceFile(const ClassDef& class_def) const {
if (class_def.source_file_idx_ == 0xffffffff) {
@@ -1200,10 +1210,6 @@
// Returns true if the header magic and version numbers are of the expected values.
bool CheckMagicAndVersion(std::string* error_msg) const;
- void DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32_t method_idx,
- DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
- void* context, const uint8_t* stream, LocalInfo* local_in_reg) const;
-
// Check whether a location denotes a multidex dex file. This is a very simple check: returns
// whether the string contains the separator character.
static bool IsMultiDexLocation(const char* location);
@@ -1275,6 +1281,7 @@
}
}
bool HasNext() const { return pos_ < size_; }
+ size_t Size() const { return size_; }
void Next() { ++pos_; }
uint16_t GetTypeIdx() {
return type_list_->GetTypeItem(pos_).type_idx_;
@@ -1516,9 +1523,11 @@
const DexFile::ClassDef& class_def);
// A constructor meant to be called from runtime code.
- EncodedStaticFieldValueIterator(const DexFile& dex_file, Handle<mirror::DexCache>* dex_cache,
+ EncodedStaticFieldValueIterator(const DexFile& dex_file,
+ Handle<mirror::DexCache>* dex_cache,
Handle<mirror::ClassLoader>* class_loader,
- ClassLinker* linker, const DexFile::ClassDef& class_def)
+ ClassLinker* linker,
+ const DexFile::ClassDef& class_def)
SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive>
@@ -1551,6 +1560,14 @@
const jvalue& GetJavaValue() const { return jval_; }
private:
+ EncodedStaticFieldValueIterator(const DexFile& dex_file,
+ Handle<mirror::DexCache>* dex_cache,
+ Handle<mirror::ClassLoader>* class_loader,
+ ClassLinker* linker,
+ const DexFile::ClassDef& class_def,
+ size_t pos,
+ ValueType type);
+
static constexpr uint8_t kEncodedValueTypeMask = 0x1f; // 0b11111
static constexpr uint8_t kEncodedValueArgShift = 5;
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 27865e3..f5b68fa 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -79,11 +79,17 @@
// functions directly. For x86 and x86-64, compilers need a wrapper
// assembly function, to handle mismatch in ABI.
+// Mark the heap reference `obj`. This entry point is used by read
+// barrier fast path implementations generated by the compiler to mark
+// an object that is referenced by a field of a gray object.
+extern "C" mirror::Object* artReadBarrierMark(mirror::Object* obj)
+ SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR;
+
// Read barrier entrypoint for heap references.
-// This is the read barrier slow path for instance and static fields and reference-type arrays.
-// TODO: Currently the read barrier does not have a fast path for compilers to directly generate.
-// Ideally the slow path should only take one parameter "ref".
-extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref, mirror::Object* obj,
+// This is the read barrier slow path for instance and static fields
+// and reference type arrays.
+extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref,
+ mirror::Object* obj,
uint32_t offset)
SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR;
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 3eea723..faa4747 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -163,6 +163,7 @@
V(NewStringFromStringBuilder, void) \
\
V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*) \
+ V(ReadBarrierMark, mirror::Object*, mirror::Object*) \
V(ReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t) \
V(ReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*)
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 7ec5fc5..25c0bda 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -559,8 +559,11 @@
return -1; // failure
}
-// TODO: Currently the read barrier does not have a fast path. Ideally the slow path should only
-// take one parameter "ref", which is given by the fast path.
+extern "C" mirror::Object* artReadBarrierMark(mirror::Object* obj) {
+ DCHECK(kEmitCompilerReadBarrier);
+ return ReadBarrier::Mark(obj);
+}
+
extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUSED,
mirror::Object* obj,
uint32_t offset) {
@@ -579,7 +582,6 @@
extern "C" mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root) {
DCHECK(kEmitCompilerReadBarrier);
- // TODO: Pass a GcRootSource object as second argument to GcRoot::Read?
return root->Read();
}
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 391eb72..dc9f14c 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -318,7 +318,8 @@
sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromStringBuilder, pReadBarrierJni,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierJni, pReadBarrierSlow, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierJni, pReadBarrierMark, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierMark, pReadBarrierSlow, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierSlow, pReadBarrierForRootSlow,
sizeof(void*));
diff --git a/runtime/jdwp/jdwp_expand_buf.cc b/runtime/jdwp/jdwp_expand_buf.cc
index e492d7e..961dd36 100644
--- a/runtime/jdwp/jdwp_expand_buf.cc
+++ b/runtime/jdwp/jdwp_expand_buf.cc
@@ -164,7 +164,7 @@
* have stored null bytes in a multi-byte encoding).
*/
void expandBufAddUtf8String(ExpandBuf* pBuf, const char* s) {
- int strLen = strlen(s);
+ int strLen = (s != nullptr ? strlen(s) : 0);
ensureSpace(pBuf, sizeof(uint32_t) + strLen);
SetUtf8String(pBuf->storage + pBuf->curLen, s, strLen);
pBuf->curLen += sizeof(uint32_t) + strLen;
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index ff82772..4a1e6c2 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -79,7 +79,7 @@
// Starting with N nativeLoad uses classloader local
// linker namespace instead of global LD_LIBRARY_PATH
// (23 is Marshmallow)
- if (target_sdk_version == 0) {
+ if (target_sdk_version <= INT_MAX) {
SetLdLibraryPath(env, javaLdLibraryPath);
}
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 7de6c06..ea193d7 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -33,7 +33,7 @@
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
if (with_read_barrier && kUseBakerReadBarrier) {
- // The higher bits of the rb ptr, rb_ptr_high_bits (must be zero)
+ // The higher bits of the rb_ptr, rb_ptr_high_bits (must be zero)
// is used to create artificial data dependency from the is_gray
// load to the ref field (ptr) load to avoid needing a load-load
// barrier between the two.
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index e7ad731..600b7f9 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -82,7 +82,8 @@
static void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
SHARED_REQUIRES(Locks::mutator_lock_);
- static mirror::Object* Mark(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE static mirror::Object* Mark(mirror::Object* obj)
+ SHARED_REQUIRES(Locks::mutator_lock_);
static mirror::Object* WhitePtr() {
return reinterpret_cast<mirror::Object*>(white_ptr_);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 8a8c02f..13e3774 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2548,6 +2548,7 @@
QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer)
QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder)
QUICK_ENTRY_POINT_INFO(pReadBarrierJni)
+ QUICK_ENTRY_POINT_INFO(pReadBarrierMark)
QUICK_ENTRY_POINT_INFO(pReadBarrierSlow)
QUICK_ENTRY_POINT_INFO(pReadBarrierForRootSlow)
#undef QUICK_ENTRY_POINT_INFO
diff --git a/runtime/thread.h b/runtime/thread.h
index c556c36..6cb895c 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -545,6 +545,13 @@
OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
}
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> IsGcMarkingOffset() {
+ return ThreadOffset<pointer_size>(
+ OFFSETOF_MEMBER(Thread, tls32_) +
+ OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
+ }
+
private:
template<size_t pointer_size>
static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
diff --git a/test/554-jit-profile-file/expected.txt b/test/554-jit-profile-file/expected.txt
deleted file mode 100644
index cde211e..0000000
--- a/test/554-jit-profile-file/expected.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-JNI_OnLoad called
-ProfileInfo:
-:classes.dex
- java.lang.String Main.hotMethod()
- void Main.main(java.lang.String[])
-:classes2.dex
- java.lang.String OtherDex.hotMethod()
diff --git a/test/554-jit-profile-file/info.txt b/test/554-jit-profile-file/info.txt
deleted file mode 100644
index b1bfe81..0000000
--- a/test/554-jit-profile-file/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Check that saving and restoring profile files works correctly in a JIT environment.
diff --git a/test/554-jit-profile-file/offline_profile.cc b/test/554-jit-profile-file/offline_profile.cc
deleted file mode 100644
index 75e441f..0000000
--- a/test/554-jit-profile-file/offline_profile.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "dex_file.h"
-
-#include "jit/offline_profiling_info.h"
-#include "jni.h"
-#include "mirror/class-inl.h"
-#include "oat_file_assistant.h"
-#include "oat_file_manager.h"
-#include "scoped_thread_state_change.h"
-#include "thread.h"
-
-namespace art {
-namespace {
-
-extern "C" JNIEXPORT jstring JNICALL Java_Main_getProfileInfoDump(
- JNIEnv* env, jclass cls, jstring filename) {
- std::string dex_location;
- {
- ScopedObjectAccess soa(Thread::Current());
- dex_location = soa.Decode<mirror::Class*>(cls)->GetDexCache()->GetDexFile()->GetLocation();
- }
- const OatFile* oat_file = Runtime::Current()->GetOatFileManager().GetPrimaryOatFile();
- std::vector<std::unique_ptr<const DexFile>> dex_files =
- OatFileAssistant::LoadDexFiles(*oat_file, dex_location.c_str());
- const char* filename_chars = env->GetStringUTFChars(filename, nullptr);
-
- std::vector<const DexFile*> dex_files_raw;
- for (size_t i = 0; i < dex_files.size(); i++) {
- dex_files_raw.push_back(dex_files[i].get());
- }
-
- ProfileCompilationInfo info(filename_chars);
-
- std::string result = info.Load(dex_files_raw)
- ? info.DumpInfo(/*print_full_dex_location*/false)
- : "Could not load profile info";
-
- env->ReleaseStringUTFChars(filename, filename_chars);
- // Return the dump of the profile info. It will be compared against a golden value.
- return env->NewStringUTF(result.c_str());
-}
-
-} // namespace
-} // namespace art
diff --git a/test/554-jit-profile-file/run b/test/554-jit-profile-file/run
deleted file mode 100644
index 52908c7..0000000
--- a/test/554-jit-profile-file/run
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-exec ${RUN} \
- -Xcompiler-option --compiler-filter=interpret-only \
- --runtime-option -Xjitsaveprofilinginfo \
- --runtime-option -Xusejit:true \
- --runtime-option -Xjitwarmupthreshold:10 \
- --runtime-option -Xjitthreshold:100 \
- "${@}"
diff --git a/test/554-jit-profile-file/src-multidex/OtherDex.java b/test/554-jit-profile-file/src-multidex/OtherDex.java
deleted file mode 100644
index 51644db..0000000
--- a/test/554-jit-profile-file/src-multidex/OtherDex.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.HashMap;
-
-public class OtherDex {
- public void coldMethod() {
- hotMethod();
- }
-
- public String hotMethod() {
- HashMap<String, String> map = new HashMap<String, String>();
- for (int i = 0; i < 10; i++) {
- map.put("" + i, "" + i + 1);
- }
- return map.get("1");
- }
-}
diff --git a/test/554-jit-profile-file/src/Main.java b/test/554-jit-profile-file/src/Main.java
deleted file mode 100644
index 9228070..0000000
--- a/test/554-jit-profile-file/src/Main.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.util.HashMap;
-
-public class Main {
-
- public void coldMethod() {
- hotMethod();
- }
-
- public String hotMethod() {
- HashMap<String, String> map = new HashMap<String, String>();
- for (int i = 0; i < 10; i++) {
- map.put("" + i, "" + i + 1);
- }
- return map.get("1");
- }
-
- private static final String PKG_NAME = "test.package";
- private static final String APP_DIR_PREFIX = "app_dir_";
- private static final String CODE_CACHE = "code_cache";
- private static final String PROFILE_FILE = PKG_NAME + ".prof";
- private static final String TEMP_FILE_NAME_PREFIX = "dummy";
- private static final String TEMP_FILE_NAME_SUFFIX = "-file";
- private static final int JIT_INVOCATION_COUNT = 101;
-
- /* needs to match Runtime:: kProfileBackground */
- private static final int PROFILE_BACKGROUND = 1;
-
- public static void main(String[] args) throws Exception {
- System.loadLibrary(args[0]);
-
- File file = null;
- File appDir = null;
- File profileDir = null;
- File profileFile = null;
- try {
- // We don't know where we have rights to create the code_cache. So create
- // a dummy temporary file and get its parent directory. That will serve as
- // the app directory.
- file = createTempFile();
- appDir = new File(file.getParent(), APP_DIR_PREFIX + file.getName());
- appDir.mkdir();
- profileDir = new File(appDir, CODE_CACHE);
- profileDir.mkdir();
-
- // Registering the app info will set the profile file name.
- VMRuntime.registerAppInfo(PKG_NAME, appDir.getPath());
-
- // Make sure the hot methods are jitted.
- Main m = new Main();
- OtherDex o = new OtherDex();
- for (int i = 0; i < JIT_INVOCATION_COUNT; i++) {
- m.hotMethod();
- o.hotMethod();
- }
-
- // Sleep for 1 second to make sure that the methods had a chance to get compiled.
- Thread.sleep(1000);
- // Updating the process state to BACKGROUND will trigger profile saving.
- VMRuntime.updateProcessState(PROFILE_BACKGROUND);
-
- // Check that the profile file exists.
- profileFile = new File(profileDir, PROFILE_FILE);
- if (!profileFile.exists()) {
- throw new RuntimeException("No profile file found");
- }
- // Dump the profile file.
- // We know what methods are hot and we compare with the golden `expected` output.
- System.out.println(getProfileInfoDump(profileFile.getPath()));
- } finally {
- if (file != null) {
- file.delete();
- }
- if (profileFile != null) {
- profileFile.delete();
- }
- if (profileDir != null) {
- profileDir.delete();
- }
- if (appDir != null) {
- appDir.delete();
- }
- }
- }
-
- private static class VMRuntime {
- private static final Method registerAppInfoMethod;
- private static final Method updateProcessStateMethod;
- private static final Method getRuntimeMethod;
- static {
- try {
- Class c = Class.forName("dalvik.system.VMRuntime");
- registerAppInfoMethod = c.getDeclaredMethod("registerAppInfo",
- String.class, String.class, String.class);
- updateProcessStateMethod = c.getDeclaredMethod("updateProcessState", Integer.TYPE);
- getRuntimeMethod = c.getDeclaredMethod("getRuntime");
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
-
- public static void registerAppInfo(String pkgName, String appDir) throws Exception {
- registerAppInfoMethod.invoke(null, pkgName, appDir, null);
- }
- public static void updateProcessState(int state) throws Exception {
- Object runtime = getRuntimeMethod.invoke(null);
- updateProcessStateMethod.invoke(runtime, state);
- }
- }
-
- static native String getProfileInfoDump(
- String filename);
-
- private static File createTempFile() throws Exception {
- try {
- return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
- } catch (IOException e) {
- System.setProperty("java.io.tmpdir", "/data/local/tmp");
- try {
- return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
- } catch (IOException e2) {
- System.setProperty("java.io.tmpdir", "/sdcard");
- return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
- }
- }
- }
-}
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index f84dfe6..f74a516 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -38,8 +38,7 @@
461-get-reference-vreg/get_reference_vreg_jni.cc \
466-get-live-vreg/get_live_vreg_jni.cc \
497-inlining-and-class-loader/clear_dex_cache.cc \
- 543-env-long-ref/env_long_ref.cc \
- 554-jit-profile-file/offline_profile.cc
+ 543-env-long-ref/env_long_ref.cc
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttestd.so
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index cedba26..f0ee921 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -514,12 +514,15 @@
# Tests that should fail in the read barrier configuration.
# 055: Exceeds run time limits due to read barrier instrumentation.
# 137: Read barrier forces interpreter. Cannot run this with the interpreter.
+# 484: Baker's fast path based read barrier compiler instrumentation generates code containing
+# more parallel moves (at least on x86), thus some Checker assertions may fail.
# 537: Expects an array copy to be intrinsified, but calling-on-slowpath intrinsics are not yet
# handled in the read barrier configuration.
-# 554: Cannot run in interpreter mode and this rule cover both: the compiler and the interpreter.
+# 554: Cannot run in interpreter mode and this rule covers both: the compiler and the interpreter.
TEST_ART_BROKEN_READ_BARRIER_RUN_TESTS := \
055-enum-performance \
137-cfi \
+ 484-checker-register-hints \
537-checker-arraycopy \
554-jit-profile-file