Merge "Only compile device codegen for device architectures"
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index f8b7460..c754e55 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -22,11 +22,13 @@
#include <sstream>
#include "arch/instruction_set.h"
+#include "base/enums.h"
#include "debug/dwarf/dwarf_constants.h"
#include "debug/dwarf/dwarf_test.h"
#include "debug/dwarf/headers.h"
#include "disassembler/disassembler.h"
#include "gtest/gtest.h"
+#include "thread.h"
namespace art {
@@ -57,7 +59,13 @@
// Pretty-print assembly.
const uint8_t* asm_base = actual_asm.data();
const uint8_t* asm_end = asm_base + actual_asm.size();
- auto* opts = new DisassemblerOptions(false, asm_base, asm_end, true);
+ auto* opts = new DisassemblerOptions(false,
+ asm_base,
+ asm_end,
+ true,
+ is64bit
+ ? &Thread::DumpThreadOffset<PointerSize::k64>
+ : &Thread::DumpThreadOffset<PointerSize::k32>);
std::unique_ptr<Disassembler> disasm(Disassembler::Create(isa, opts));
std::stringstream stream;
const uint8_t* base = actual_asm.data() + (isa == kThumb2 ? 1 : 0);
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index efae4d0..bb45999 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -52,6 +52,7 @@
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/method.h"
#include "mirror/object-inl.h"
@@ -1418,6 +1419,9 @@
bin_offset = RoundUp(bin_offset, method_alignment);
break;
}
+ case kBinDexCacheArray:
+ bin_offset = RoundUp(bin_offset, DexCacheArraysLayout::Alignment());
+ break;
case kBinImTable:
case kBinIMTConflictTable: {
bin_offset = RoundUp(bin_offset, static_cast<size_t>(target_ptr_size_));
@@ -2034,7 +2038,7 @@
// 64-bit values here, clearing the top 32 bits for 32-bit targets. The zero-extension is
// done by casting to the unsigned type uintptr_t before casting to int64_t, i.e.
// static_cast<int64_t>(reinterpret_cast<uintptr_t>(image_begin_ + offset))).
- GcRoot<mirror::String>* orig_strings = orig_dex_cache->GetStrings();
+ mirror::StringDexCacheType* orig_strings = orig_dex_cache->GetStrings();
if (orig_strings != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::StringsOffset(),
NativeLocationInImage(orig_strings),
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 8273b15..8a80982 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1189,8 +1189,13 @@
}
mirror::String* GetTargetString(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
- mirror::DexCache* dex_cache = GetDexCache(patch.TargetStringDexFile());
- mirror::String* string = dex_cache->GetResolvedString(patch.TargetStringIndex());
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ StackHandleScope<1> hs(soa.Self());
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(GetDexCache(patch.TargetStringDexFile())));
+ mirror::String* string = linker->LookupString(*patch.TargetStringDexFile(),
+ patch.TargetStringIndex(),
+ dex_cache);
DCHECK(string != nullptr);
DCHECK(writer_->HasBootImage() ||
Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string));
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 5152075..c532e72 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1228,7 +1228,8 @@
instruction->IsLoadString() ||
instruction->IsInstanceOf() ||
instruction->IsCheckCast() ||
- (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))
+ (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()) ||
+ (instruction->IsInvokeStaticOrDirect() && instruction->GetLocations()->Intrinsified()))
<< "instruction->DebugName()=" << instruction->DebugName()
<< " slow_path->GetDescription()=" << slow_path->GetDescription();
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 4c4128c..404f044 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -429,7 +429,8 @@
instruction_->IsLoadString() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
+ (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
+ (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
<< "Unexpected instruction in read barrier marking slow path: "
<< instruction_->DebugName();
@@ -441,6 +442,9 @@
DCHECK_NE(reg, SP);
DCHECK_NE(reg, LR);
DCHECK_NE(reg, PC);
+ // IP is used internally by the ReadBarrierMarkRegX entry point
+ // as a temporary, it cannot be the entry point's input/output.
+ DCHECK_NE(reg, IP);
DCHECK(0 <= reg && reg < kNumberOfCoreRegisters) << reg;
// "Compact" slow path, saving two moves.
//
@@ -5585,55 +5589,15 @@
__ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
return; // No dex cache slow path.
}
- case HLoadString::LoadKind::kDexCacheAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
- // 16-bit LDR immediate has a 5-bit offset multiplied by the size and that gives
- // a 128B range. To try and reduce the number of literals if we load multiple strings,
- // simply split the dex cache address to a 128B aligned base loaded from a literal
- // and the remaining offset embedded in the load.
- static_assert(sizeof(GcRoot<mirror::String>) == 4u, "Expected GC root to be 4 bytes.");
- DCHECK_ALIGNED(load->GetAddress(), 4u);
- constexpr size_t offset_bits = /* encoded bits */ 5 + /* scale */ 2;
- uint32_t base_address = address & ~MaxInt<uint32_t>(offset_bits);
- uint32_t offset = address & MaxInt<uint32_t>(offset_bits);
- __ LoadLiteral(out, codegen_->DeduplicateDexCacheAddressLiteral(base_address));
- // /* GcRoot<mirror::String> */ out = *(base_address + offset)
- GenerateGcRootFieldLoad(load, out_loc, out, offset);
- break;
- }
- case HLoadString::LoadKind::kDexCachePcRelative: {
- Register base_reg = locations->InAt(0).AsRegister<Register>();
- HArmDexCacheArraysBase* base = load->InputAt(0)->AsArmDexCacheArraysBase();
- int32_t offset = load->GetDexCacheElementOffset() - base->GetElementOffset();
- // /* GcRoot<mirror::String> */ out = *(dex_cache_arrays_base + offset)
- GenerateGcRootFieldLoad(load, out_loc, base_reg, offset);
- break;
- }
- case HLoadString::LoadKind::kDexCacheViaMethod: {
- Register current_method = locations->InAt(0).AsRegister<Register>();
-
- // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
- GenerateGcRootFieldLoad(
- load, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
- // /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_
- __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
- // /* GcRoot<mirror::String> */ out = out[string_index]
- GenerateGcRootFieldLoad(
- load, out_loc, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
- break;
- }
default:
- LOG(FATAL) << "Unexpected load kind: " << load->GetLoadKind();
- UNREACHABLE();
+ break;
}
- if (!load->IsInDexCache()) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
- codegen_->AddSlowPath(slow_path);
- __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
- }
+ // TODO: Re-add the compiler code to do string dex cache lookup again.
+ SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
+ codegen_->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
}
static int32_t GetExceptionTlsOffset() {
@@ -6522,7 +6486,8 @@
// Introduce a dependency on the lock_word including the rb_state,
// which shall prevent load-load reordering without using
// a memory barrier (which would be more expensive).
- // obj is unchanged by this operation, but its value now depends on temp_reg.
+ // `obj` is unchanged by this operation, but its value now depends
+ // on `temp_reg`.
__ add(obj, obj, ShifterOperand(temp_reg, LSR, 32));
// The actual reference load.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index d95e7df..122c174 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -595,7 +595,8 @@
instruction_->IsLoadString() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
+ (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
+ (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
<< "Unexpected instruction in read barrier marking slow path: "
<< instruction_->DebugName();
@@ -607,7 +608,8 @@
DCHECK_NE(obj_.reg(), LR);
DCHECK_NE(obj_.reg(), WSP);
DCHECK_NE(obj_.reg(), WZR);
- // WIP0 is used by the slow path as a temp, it can not be the object register.
+ // IP0 is used internally by the ReadBarrierMarkRegX entry point
+ // as a temporary, it cannot be the entry point's input/output.
DCHECK_NE(obj_.reg(), IP0);
DCHECK(0 <= obj_.reg() && obj_.reg() < kNumberOfWRegisters) << obj_.reg();
// "Compact" slow path, saving two moves.
@@ -4195,7 +4197,6 @@
}
void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
- Location out_loc = load->GetLocations()->Out();
Register out = OutputRegister(load);
switch (load->GetLoadKind()) {
@@ -4231,63 +4232,15 @@
__ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(load->GetAddress()));
return; // No dex cache slow path.
}
- case HLoadString::LoadKind::kDexCacheAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- // LDR immediate has a 12-bit offset multiplied by the size and for 32-bit loads
- // that gives a 16KiB range. To try and reduce the number of literals if we load
- // multiple strings, simply split the dex cache address to a 16KiB aligned base
- // loaded from a literal and the remaining offset embedded in the load.
- static_assert(sizeof(GcRoot<mirror::String>) == 4u, "Expected GC root to be 4 bytes.");
- DCHECK_ALIGNED(load->GetAddress(), 4u);
- constexpr size_t offset_bits = /* encoded bits */ 12 + /* scale */ 2;
- uint64_t base_address = load->GetAddress() & ~MaxInt<uint64_t>(offset_bits);
- uint32_t offset = load->GetAddress() & MaxInt<uint64_t>(offset_bits);
- __ Ldr(out.X(), codegen_->DeduplicateDexCacheAddressLiteral(base_address));
- // /* GcRoot<mirror::String> */ out = *(base_address + offset)
- GenerateGcRootFieldLoad(load, out_loc, out.X(), offset);
- break;
- }
- case HLoadString::LoadKind::kDexCachePcRelative: {
- // Add ADRP with its PC-relative DexCache access patch.
- const DexFile& dex_file = load->GetDexFile();
- uint32_t element_offset = load->GetDexCacheElementOffset();
- vixl::aarch64::Label* adrp_label =
- codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
- {
- SingleEmissionCheckScope guard(GetVIXLAssembler());
- __ Bind(adrp_label);
- __ adrp(out.X(), /* offset placeholder */ 0);
- }
- // Add LDR with its PC-relative DexCache access patch.
- vixl::aarch64::Label* ldr_label =
- codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
- // /* GcRoot<mirror::String> */ out = *(base_address + offset) /* PC-relative */
- GenerateGcRootFieldLoad(load, out_loc, out.X(), /* offset placeholder */ 0, ldr_label);
- break;
- }
- case HLoadString::LoadKind::kDexCacheViaMethod: {
- Register current_method = InputRegisterAt(load, 0);
- // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
- GenerateGcRootFieldLoad(
- load, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
- // /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_
- __ Ldr(out.X(), HeapOperand(out, mirror::Class::DexCacheStringsOffset().Uint32Value()));
- // /* GcRoot<mirror::String> */ out = out[string_index]
- GenerateGcRootFieldLoad(
- load, out_loc, out.X(), CodeGenerator::GetCacheOffset(load->GetStringIndex()));
- break;
- }
default:
- LOG(FATAL) << "Unexpected load kind: " << load->GetLoadKind();
- UNREACHABLE();
+ break;
}
- if (!load->IsInDexCache()) {
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load);
- codegen_->AddSlowPath(slow_path);
- __ Cbz(out, slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
- }
+ // TODO: Re-add the compiler code to do string dex cache lookup again.
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load);
+ codegen_->AddSlowPath(slow_path);
+ __ B(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
}
void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
@@ -5239,7 +5192,8 @@
// Introduce a dependency on the lock_word including rb_state,
// to prevent load-load reordering, and without using
// a memory barrier (which would be more expensive).
- // obj is unchanged by this operation, but its value now depends on temp.
+ // `obj` is unchanged by this operation, but its value now depends
+ // on `temp`.
__ Add(obj.X(), obj.X(), Operand(temp.X(), LSR, 32));
// The actual reference load.
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 58879bc..a7fbc84 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -4580,11 +4580,6 @@
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
base_or_current_method_reg = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
break;
- // We need an extra register for PC-relative dex cache accesses.
- case HLoadString::LoadKind::kDexCachePcRelative:
- case HLoadString::LoadKind::kDexCacheViaMethod:
- base_or_current_method_reg = locations->InAt(0).AsRegister<Register>();
- break;
default:
base_or_current_method_reg = ZERO;
break;
@@ -4628,52 +4623,15 @@
codegen_->DeduplicateBootImageAddressLiteral(address));
return; // No dex cache slow path.
}
- case HLoadString::LoadKind::kDexCacheAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
- static_assert(sizeof(GcRoot<mirror::String>) == 4u, "Expected GC root to be 4 bytes.");
- DCHECK_ALIGNED(load->GetAddress(), 4u);
- int16_t offset = Low16Bits(address);
- uint32_t base_address = address - offset; // This accounts for offset sign extension.
- __ Lui(out, High16Bits(base_address));
- // /* GcRoot<mirror::String> */ out = *(base_address + offset)
- GenerateGcRootFieldLoad(load, out_loc, out, offset);
- break;
- }
- case HLoadString::LoadKind::kDexCachePcRelative: {
- HMipsDexCacheArraysBase* base = load->InputAt(0)->AsMipsDexCacheArraysBase();
- int32_t offset =
- load->GetDexCacheElementOffset() - base->GetElementOffset() - kDexCacheArrayLwOffset;
- // /* GcRoot<mirror::String> */ out = *(dex_cache_arrays_base + offset)
- GenerateGcRootFieldLoad(load, out_loc, base_or_current_method_reg, offset);
- break;
- }
- case HLoadString::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
- GenerateGcRootFieldLoad(load,
- out_loc,
- base_or_current_method_reg,
- ArtMethod::DeclaringClassOffset().Int32Value());
- // /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_
- __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
- // /* GcRoot<mirror::String> */ out = out[string_index]
- GenerateGcRootFieldLoad(load,
- out_loc,
- out,
- CodeGenerator::GetCacheOffset(load->GetStringIndex()));
- break;
- }
default:
- LOG(FATAL) << "Unexpected load kind: " << load->GetLoadKind();
- UNREACHABLE();
+ break;
}
- if (!load->IsInDexCache()) {
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
- codegen_->AddSlowPath(slow_path);
- __ Beqz(out, slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
- }
+ // TODO: Re-add the compiler code to do string dex cache lookup again.
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
+ codegen_->AddSlowPath(slow_path);
+ __ B(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
}
void LocationsBuilderMIPS::VisitLongConstant(HLongConstant* constant) {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 4e7a272..4a5755c 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3261,22 +3261,11 @@
}
void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
- LocationSummary* locations = load->GetLocations();
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
- GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
- __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
- ArtMethod::DeclaringClassOffset().Int32Value());
- __ LoadFromOffset(kLoadDoubleword, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
- __ LoadFromOffset(
- kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
- // TODO: We will need a read barrier here.
-
- if (!load->IsInDexCache()) {
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
- codegen_->AddSlowPath(slow_path);
- __ Beqzc(out, slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
- }
+ // TODO: Re-add the compiler code to do string dex cache lookup again.
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
+ codegen_->AddSlowPath(slow_path);
+ __ Bc(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
}
void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 7a561bb..7aca16f 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -464,7 +464,8 @@
instruction_->IsLoadString() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
+ (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
+ (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
<< "Unexpected instruction in read barrier marking slow path: "
<< instruction_->DebugName();
@@ -1578,15 +1579,15 @@
locations->SetOut(Location::SameAsFirstInput());
}
-void InstructionCodeGeneratorX86::GenerateIntCompare(Location lhs, Location rhs) {
+void CodeGeneratorX86::GenerateIntCompare(Location lhs, Location rhs) {
Register lhs_reg = lhs.AsRegister<Register>();
if (rhs.IsConstant()) {
int32_t value = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
- codegen_->Compare32BitValue(lhs_reg, value);
+ Compare32BitValue(lhs_reg, value);
} else if (rhs.IsStackSlot()) {
- __ cmpl(lhs_reg, Address(ESP, rhs.GetStackIndex()));
+ assembler_.cmpl(lhs_reg, Address(ESP, rhs.GetStackIndex()));
} else {
- __ cmpl(lhs_reg, rhs.AsRegister<Register>());
+ assembler_.cmpl(lhs_reg, rhs.AsRegister<Register>());
}
}
@@ -1619,7 +1620,7 @@
DCHECK_NE(condition->InputAt(0)->GetType(), Primitive::kPrimLong);
DCHECK(!Primitive::IsFloatingPointType(condition->InputAt(0)->GetType()));
LocationSummary* cond_locations = condition->GetLocations();
- GenerateIntCompare(cond_locations->InAt(0), cond_locations->InAt(1));
+ codegen_->GenerateIntCompare(cond_locations->InAt(0), cond_locations->InAt(1));
cond = X86Condition(condition->GetCondition());
}
} else {
@@ -1728,7 +1729,7 @@
// Clear output register: setb only sets the low byte.
__ xorl(reg, reg);
- GenerateIntCompare(lhs, rhs);
+ codegen_->GenerateIntCompare(lhs, rhs);
__ setb(X86Condition(cond->GetCondition()), reg);
return;
}
@@ -4210,7 +4211,7 @@
case Primitive::kPrimShort:
case Primitive::kPrimChar:
case Primitive::kPrimInt: {
- GenerateIntCompare(left, right);
+ codegen_->GenerateIntCompare(left, right);
break;
}
case Primitive::kPrimLong: {
@@ -6230,48 +6231,15 @@
codegen_->RecordSimplePatch();
return; // No dex cache slow path.
}
- case HLoadString::LoadKind::kDexCacheAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
- // /* GcRoot<mirror::String> */ out = *address
- GenerateGcRootFieldLoad(load, out_loc, Address::Absolute(address));
- break;
- }
- case HLoadString::LoadKind::kDexCachePcRelative: {
- Register base_reg = locations->InAt(0).AsRegister<Register>();
- uint32_t offset = load->GetDexCacheElementOffset();
- Label* fixup_label = codegen_->NewPcRelativeDexCacheArrayPatch(load->GetDexFile(), offset);
- // /* GcRoot<mirror::String> */ out = *(base + offset) /* PC-relative */
- GenerateGcRootFieldLoad(
- load, out_loc, Address(base_reg, CodeGeneratorX86::kDummy32BitOffset), fixup_label);
- break;
- }
- case HLoadString::LoadKind::kDexCacheViaMethod: {
- Register current_method = locations->InAt(0).AsRegister<Register>();
-
- // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
- GenerateGcRootFieldLoad(
- load, out_loc, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
-
- // /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_
- __ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
- // /* GcRoot<mirror::String> */ out = out[string_index]
- GenerateGcRootFieldLoad(
- load, out_loc, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
- break;
- }
default:
- LOG(FATAL) << "Unexpected load kind: " << load->GetLoadKind();
- UNREACHABLE();
+ break;
}
- if (!load->IsInDexCache()) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
- codegen_->AddSlowPath(slow_path);
- __ testl(out, out);
- __ j(kEqual, slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
- }
+ // TODO: Re-add the compiler code to do string dex cache lookup again.
+ SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
+ codegen_->AddSlowPath(slow_path);
+ __ jmp(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
}
static Address GetExceptionTlsAddress() {
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index f306b33..894f2e8 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -295,7 +295,6 @@
HBasicBlock* default_block);
void GenerateFPCompare(Location lhs, Location rhs, HInstruction* insn, bool is_double);
- void GenerateIntCompare(Location lhs, Location rhs);
X86Assembler* const assembler_;
CodeGeneratorX86* const codegen_;
@@ -431,6 +430,8 @@
Register value,
bool value_can_be_null);
+ void GenerateIntCompare(Location lhs, Location rhs);
+
void GenerateMemoryBarrier(MemBarrierKind kind);
Label* GetLabelOf(HBasicBlock* block) const {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index cf01a79..0c55ae4 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -485,7 +485,8 @@
instruction_->IsLoadString() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
+ (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
+ (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
<< "Unexpected instruction in read barrier marking slow path: "
<< instruction_->DebugName();
@@ -5635,53 +5636,15 @@
codegen_->RecordSimplePatch();
return; // No dex cache slow path.
}
- case HLoadString::LoadKind::kDexCacheAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- // /* GcRoot<mirror::String> */ out = *address
- if (IsUint<32>(load->GetAddress())) {
- Address address = Address::Absolute(load->GetAddress(), /* no_rip */ true);
- GenerateGcRootFieldLoad(load, out_loc, address);
- } else {
- // TODO: Consider using opcode A1, i.e. movl eax, moff32 (with 64-bit address).
- __ movq(out, Immediate(load->GetAddress()));
- GenerateGcRootFieldLoad(load, out_loc, Address(out, 0));
- }
- break;
- }
- case HLoadString::LoadKind::kDexCachePcRelative: {
- uint32_t offset = load->GetDexCacheElementOffset();
- Label* fixup_label = codegen_->NewPcRelativeDexCacheArrayPatch(load->GetDexFile(), offset);
- Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ false);
- // /* GcRoot<mirror::String> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(load, out_loc, address, fixup_label);
- break;
- }
- case HLoadString::LoadKind::kDexCacheViaMethod: {
- CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
-
- // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
- GenerateGcRootFieldLoad(
- load, out_loc, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
- // /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_
- __ movq(out, Address(out, mirror::Class::DexCacheStringsOffset().Uint32Value()));
- // /* GcRoot<mirror::String> */ out = out[string_index]
- GenerateGcRootFieldLoad(
- load, out_loc, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
- break;
- }
default:
- LOG(FATAL) << "Unexpected load kind: " << load->GetLoadKind();
- UNREACHABLE();
+ break;
}
- if (!load->IsInDexCache()) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
- codegen_->AddSlowPath(slow_path);
- __ testl(out, out);
- __ j(kEqual, slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
- }
+ // TODO: Re-add the compiler code to do string dex cache lookup again.
+ SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
+ codegen_->AddSlowPath(slow_path);
+ __ jmp(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
}
static Address GetExceptionTlsAddress() {
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 89d80cc..b3d5341 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -122,7 +122,10 @@
new DisassemblerOptions(/* absolute_addresses */ false,
base_address,
end_address,
- /* can_read_literals */ true)));
+ /* can_read_literals */ true,
+ Is64BitInstructionSet(instruction_set)
+ ? &Thread::DumpThreadOffset<PointerSize::k64>
+ : &Thread::DumpThreadOffset<PointerSize::k32>)));
}
~HGraphVisualizerDisassembler() {
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 27d9d48..0bbc0e5 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -41,6 +41,92 @@
using IntrinsicSlowPathARM = IntrinsicSlowPath<InvokeDexCallingConventionVisitorARM>;
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())-> // NOLINT
+
+// Slow path implementing the SystemArrayCopy intrinsic copy loop with read barriers.
+class ReadBarrierSystemArrayCopySlowPathARM : public SlowPathCode {
+ public:
+ explicit ReadBarrierSystemArrayCopySlowPathARM(HInstruction* instruction)
+ : SlowPathCode(instruction) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(locations->CanCall());
+ DCHECK(instruction_->IsInvokeStaticOrDirect())
+ << "Unexpected instruction in read barrier arraycopy slow path: "
+ << instruction_->DebugName();
+ DCHECK(instruction_->GetLocations()->Intrinsified());
+ DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kSystemArrayCopy);
+
+ int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
+ uint32_t element_size_shift = Primitive::ComponentSizeShift(Primitive::kPrimNot);
+ uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
+
+ Register dest = locations->InAt(2).AsRegister<Register>();
+ Location dest_pos = locations->InAt(3);
+ Register src_curr_addr = locations->GetTemp(0).AsRegister<Register>();
+ Register dst_curr_addr = locations->GetTemp(1).AsRegister<Register>();
+ Register src_stop_addr = locations->GetTemp(2).AsRegister<Register>();
+ Register tmp = locations->GetTemp(3).AsRegister<Register>();
+
+ __ Bind(GetEntryLabel());
+ // Compute the base destination address in `dst_curr_addr`.
+ if (dest_pos.IsConstant()) {
+ int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ AddConstant(dst_curr_addr, dest, element_size * constant + offset);
+ } else {
+ __ add(dst_curr_addr,
+ dest,
+ ShifterOperand(dest_pos.AsRegister<Register>(), LSL, element_size_shift));
+ __ AddConstant(dst_curr_addr, offset);
+ }
+
+ Label loop;
+ __ Bind(&loop);
+ __ ldr(tmp, Address(src_curr_addr, element_size, Address::PostIndex));
+ __ MaybeUnpoisonHeapReference(tmp);
+ // TODO: Inline the mark bit check before calling the runtime?
+ // tmp = ReadBarrier::Mark(tmp);
+ // No need to save live registers; it's taken care of by the
+ // entrypoint. Also, there is no need to update the stack mask,
+ // as this runtime call will not trigger a garbage collection.
+ // (See ReadBarrierMarkSlowPathARM::EmitNativeCode for more
+ // explanations.)
+ DCHECK_NE(tmp, SP);
+ DCHECK_NE(tmp, LR);
+ DCHECK_NE(tmp, PC);
+ // IP is used internally by the ReadBarrierMarkRegX entry point
+ // as a temporary (and not preserved). It thus cannot be used by
+ // any live register in this slow path.
+ DCHECK_NE(src_curr_addr, IP);
+ DCHECK_NE(dst_curr_addr, IP);
+ DCHECK_NE(src_stop_addr, IP);
+ DCHECK_NE(tmp, IP);
+ DCHECK(0 <= tmp && tmp < kNumberOfCoreRegisters) << tmp;
+ int32_t entry_point_offset =
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(tmp);
+ // This runtime call does not require a stack map.
+ arm_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
+ __ MaybePoisonHeapReference(tmp);
+ __ str(tmp, Address(dst_curr_addr, element_size, Address::PostIndex));
+ __ cmp(src_curr_addr, ShifterOperand(src_stop_addr));
+ __ b(&loop, NE);
+ __ b(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathARM"; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathARM);
+};
+
+#undef __
+
bool IntrinsicLocationsBuilderARM::TryDispatch(HInvoke* invoke) {
Dispatch(invoke);
LocationSummary* res = invoke->GetLocations();
@@ -1337,9 +1423,9 @@
}
void IntrinsicLocationsBuilderARM::VisitSystemArrayCopy(HInvoke* invoke) {
- // TODO(rpl): Implement read barriers in the SystemArrayCopy
- // intrinsic and re-enable it (b/29516905).
- if (kEmitCompilerReadBarrier) {
+ // The only read barrier implementation supporting the
+ // SystemArrayCopy intrinsic is the Baker-style read barriers.
+ if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -1362,6 +1448,13 @@
if (length != nullptr && !assembler_->ShifterOperandCanAlwaysHold(length->GetValue())) {
locations->SetInAt(4, Location::RequiresRegister());
}
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // Temporary register IP cannot be used in
+ // ReadBarrierSystemArrayCopySlowPathARM64 (because that register
+ // is clobbered by ReadBarrierMarkRegX entry points). Get an extra
+ // temporary register from the register allocator.
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
static void CheckPosition(ArmAssembler* assembler,
@@ -1427,9 +1520,9 @@
}
void IntrinsicCodeGeneratorARM::VisitSystemArrayCopy(HInvoke* invoke) {
- // TODO(rpl): Implement read barriers in the SystemArrayCopy
- // intrinsic and re-enable it (b/29516905).
- DCHECK(!kEmitCompilerReadBarrier);
+ // The only read barrier implementation supporting the
+ // SystemArrayCopy intrinsic is the Baker-style read barriers.
+ DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
ArmAssembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -1438,18 +1531,22 @@
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+ uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
Register src = locations->InAt(0).AsRegister<Register>();
Location src_pos = locations->InAt(1);
Register dest = locations->InAt(2).AsRegister<Register>();
Location dest_pos = locations->InAt(3);
Location length = locations->InAt(4);
- Register temp1 = locations->GetTemp(0).AsRegister<Register>();
- Register temp2 = locations->GetTemp(1).AsRegister<Register>();
- Register temp3 = locations->GetTemp(2).AsRegister<Register>();
+ Location temp1_loc = locations->GetTemp(0);
+ Register temp1 = temp1_loc.AsRegister<Register>();
+ Location temp2_loc = locations->GetTemp(1);
+ Register temp2 = temp2_loc.AsRegister<Register>();
+ Location temp3_loc = locations->GetTemp(2);
+ Register temp3 = temp3_loc.AsRegister<Register>();
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCode* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
+ codegen_->AddSlowPath(intrinsic_slow_path);
Label conditions_on_positions_validated;
SystemArrayCopyOptimizations optimizations(invoke);
@@ -1465,7 +1562,7 @@
DCHECK_GE(src_pos_constant, dest_pos_constant);
} else if (src_pos_constant < dest_pos_constant) {
__ cmp(src, ShifterOperand(dest));
- __ b(slow_path->GetEntryLabel(), EQ);
+ __ b(intrinsic_slow_path->GetEntryLabel(), EQ);
}
// Checked when building locations.
@@ -1477,7 +1574,7 @@
__ b(&conditions_on_positions_validated, NE);
}
__ cmp(dest_pos.AsRegister<Register>(), ShifterOperand(src_pos_constant));
- __ b(slow_path->GetEntryLabel(), GT);
+ __ b(intrinsic_slow_path->GetEntryLabel(), GT);
}
} else {
if (!optimizations.GetDestinationIsSource()) {
@@ -1490,19 +1587,19 @@
} else {
__ cmp(src_pos.AsRegister<Register>(), ShifterOperand(dest_pos.AsRegister<Register>()));
}
- __ b(slow_path->GetEntryLabel(), LT);
+ __ b(intrinsic_slow_path->GetEntryLabel(), LT);
}
__ Bind(&conditions_on_positions_validated);
if (!optimizations.GetSourceIsNotNull()) {
// Bail out if the source is null.
- __ CompareAndBranchIfZero(src, slow_path->GetEntryLabel());
+ __ CompareAndBranchIfZero(src, intrinsic_slow_path->GetEntryLabel());
}
if (!optimizations.GetDestinationIsNotNull() && !optimizations.GetDestinationIsSource()) {
// Bail out if the destination is null.
- __ CompareAndBranchIfZero(dest, slow_path->GetEntryLabel());
+ __ CompareAndBranchIfZero(dest, intrinsic_slow_path->GetEntryLabel());
}
// If the length is negative, bail out.
@@ -1511,7 +1608,7 @@
!optimizations.GetCountIsSourceLength() &&
!optimizations.GetCountIsDestinationLength()) {
__ cmp(length.AsRegister<Register>(), ShifterOperand(0));
- __ b(slow_path->GetEntryLabel(), LT);
+ __ b(intrinsic_slow_path->GetEntryLabel(), LT);
}
// Validity checks: source.
@@ -1519,7 +1616,7 @@
src_pos,
src,
length,
- slow_path,
+ intrinsic_slow_path,
temp1,
optimizations.GetCountIsSourceLength());
@@ -1528,7 +1625,7 @@
dest_pos,
dest,
length,
- slow_path,
+ intrinsic_slow_path,
temp1,
optimizations.GetCountIsDestinationLength());
@@ -1537,112 +1634,287 @@
// type of the destination array. We do two checks: the classes are the same,
// or the destination is Object[]. If none of these checks succeed, we go to the
// slow path.
- __ LoadFromOffset(kLoadWord, temp1, dest, class_offset);
- __ LoadFromOffset(kLoadWord, temp2, src, class_offset);
- bool did_unpoison = false;
- if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
- !optimizations.GetSourceIsNonPrimitiveArray()) {
- // One or two of the references need to be unpoisoned. Unpoison them
- // both to make the identity check valid.
- __ MaybeUnpoisonHeapReference(temp1);
- __ MaybeUnpoisonHeapReference(temp2);
- did_unpoison = true;
- }
- if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
- // Bail out if the destination is not a non primitive array.
- // /* HeapReference<Class> */ temp3 = temp1->component_type_
- __ LoadFromOffset(kLoadWord, temp3, temp1, component_offset);
- __ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
- __ MaybeUnpoisonHeapReference(temp3);
- __ LoadFromOffset(kLoadUnsignedHalfword, temp3, temp3, primitive_offset);
- static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
- __ CompareAndBranchIfNonZero(temp3, slow_path->GetEntryLabel());
- }
-
- if (!optimizations.GetSourceIsNonPrimitiveArray()) {
- // Bail out if the source is not a non primitive array.
- // /* HeapReference<Class> */ temp3 = temp2->component_type_
- __ LoadFromOffset(kLoadWord, temp3, temp2, component_offset);
- __ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
- __ MaybeUnpoisonHeapReference(temp3);
- __ LoadFromOffset(kLoadUnsignedHalfword, temp3, temp3, primitive_offset);
- static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
- __ CompareAndBranchIfNonZero(temp3, slow_path->GetEntryLabel());
- }
-
- __ cmp(temp1, ShifterOperand(temp2));
-
- if (optimizations.GetDestinationIsTypedObjectArray()) {
- Label do_copy;
- __ b(&do_copy, EQ);
- if (!did_unpoison) {
- __ MaybeUnpoisonHeapReference(temp1);
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+ // /* HeapReference<Class> */ temp1 = src->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+ // Bail out if the source is not a non primitive array.
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+ __ CompareAndBranchIfZero(temp1, intrinsic_slow_path->GetEntryLabel());
+ // If heap poisoning is enabled, `temp1` has been unpoisoned
+ // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ // /* uint16_t */ temp1 = static_cast<uint16>(temp1->primitive_type_);
+ __ LoadFromOffset(kLoadUnsignedHalfword, temp1, temp1, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ CompareAndBranchIfNonZero(temp1, intrinsic_slow_path->GetEntryLabel());
}
- // /* HeapReference<Class> */ temp1 = temp1->component_type_
- __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
- __ MaybeUnpoisonHeapReference(temp1);
- // /* HeapReference<Class> */ temp1 = temp1->super_class_
- __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
- // No need to unpoison the result, we're comparing against null.
- __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
- __ Bind(&do_copy);
+
+ // /* HeapReference<Class> */ temp1 = dest->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check */ false);
+
+ if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
+ // Bail out if the destination is not a non primitive array.
+ //
+ // Register `temp1` is not trashed by the read barrier emitted
+ // by GenerateFieldLoadWithBakerReadBarrier below, as that
+ // method produces a call to a ReadBarrierMarkRegX entry point,
+ // which saves all potentially live registers, including
+ // temporaries such a `temp1`.
+ // /* HeapReference<Class> */ temp2 = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check */ false);
+ __ CompareAndBranchIfZero(temp2, intrinsic_slow_path->GetEntryLabel());
+ // If heap poisoning is enabled, `temp2` has been unpoisoned
+ // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ // /* uint16_t */ temp2 = static_cast<uint16>(temp2->primitive_type_);
+ __ LoadFromOffset(kLoadUnsignedHalfword, temp2, temp2, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ CompareAndBranchIfNonZero(temp2, intrinsic_slow_path->GetEntryLabel());
+ }
+
+ // For the same reason given earlier, `temp1` is not trashed by the
+ // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
+ // /* HeapReference<Class> */ temp2 = src->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check */ false);
+ // Note: if heap poisoning is on, we are comparing two unpoisoned references here.
+ __ cmp(temp1, ShifterOperand(temp2));
+
+ if (optimizations.GetDestinationIsTypedObjectArray()) {
+ Label do_copy;
+ __ b(&do_copy, EQ);
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+ // /* HeapReference<Class> */ temp1 = temp1->super_class_
+ // We do not need to emit a read barrier for the following
+ // heap reference load, as `temp1` is only used in a
+ // comparison with null below, and this reference is not
+ // kept afterwards.
+ __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
+ __ CompareAndBranchIfNonZero(temp1, intrinsic_slow_path->GetEntryLabel());
+ __ Bind(&do_copy);
+ } else {
+ __ b(intrinsic_slow_path->GetEntryLabel(), NE);
+ }
} else {
- __ b(slow_path->GetEntryLabel(), NE);
+ // Non read barrier code.
+
+ // /* HeapReference<Class> */ temp1 = dest->klass_
+ __ LoadFromOffset(kLoadWord, temp1, dest, class_offset);
+ // /* HeapReference<Class> */ temp2 = src->klass_
+ __ LoadFromOffset(kLoadWord, temp2, src, class_offset);
+ bool did_unpoison = false;
+ if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
+ !optimizations.GetSourceIsNonPrimitiveArray()) {
+ // One or two of the references need to be unpoisoned. Unpoison them
+ // both to make the identity check valid.
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ MaybeUnpoisonHeapReference(temp2);
+ did_unpoison = true;
+ }
+
+ if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
+ // Bail out if the destination is not a non primitive array.
+ // /* HeapReference<Class> */ temp3 = temp1->component_type_
+ __ LoadFromOffset(kLoadWord, temp3, temp1, component_offset);
+ __ CompareAndBranchIfZero(temp3, intrinsic_slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(temp3);
+ // /* uint16_t */ temp3 = static_cast<uint16>(temp3->primitive_type_);
+ __ LoadFromOffset(kLoadUnsignedHalfword, temp3, temp3, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ CompareAndBranchIfNonZero(temp3, intrinsic_slow_path->GetEntryLabel());
+ }
+
+ if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+ // Bail out if the source is not a non primitive array.
+ // /* HeapReference<Class> */ temp3 = temp2->component_type_
+ __ LoadFromOffset(kLoadWord, temp3, temp2, component_offset);
+ __ CompareAndBranchIfZero(temp3, intrinsic_slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(temp3);
+ // /* uint16_t */ temp3 = static_cast<uint16>(temp3->primitive_type_);
+ __ LoadFromOffset(kLoadUnsignedHalfword, temp3, temp3, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ CompareAndBranchIfNonZero(temp3, intrinsic_slow_path->GetEntryLabel());
+ }
+
+ __ cmp(temp1, ShifterOperand(temp2));
+
+ if (optimizations.GetDestinationIsTypedObjectArray()) {
+ Label do_copy;
+ __ b(&do_copy, EQ);
+ if (!did_unpoison) {
+ __ MaybeUnpoisonHeapReference(temp1);
+ }
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
+ __ MaybeUnpoisonHeapReference(temp1);
+ // /* HeapReference<Class> */ temp1 = temp1->super_class_
+ __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
+ // No need to unpoison the result, we're comparing against null.
+ __ CompareAndBranchIfNonZero(temp1, intrinsic_slow_path->GetEntryLabel());
+ __ Bind(&do_copy);
+ } else {
+ __ b(intrinsic_slow_path->GetEntryLabel(), NE);
+ }
}
} else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
// Bail out if the source is not a non primitive array.
- // /* HeapReference<Class> */ temp1 = src->klass_
- __ LoadFromOffset(kLoadWord, temp1, src, class_offset);
- __ MaybeUnpoisonHeapReference(temp1);
- // /* HeapReference<Class> */ temp3 = temp1->component_type_
- __ LoadFromOffset(kLoadWord, temp3, temp1, component_offset);
- __ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
- __ MaybeUnpoisonHeapReference(temp3);
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // /* HeapReference<Class> */ temp1 = src->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+ // /* HeapReference<Class> */ temp3 = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+ __ CompareAndBranchIfZero(temp3, intrinsic_slow_path->GetEntryLabel());
+ // If heap poisoning is enabled, `temp3` has been unpoisoned
+ // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ } else {
+ // /* HeapReference<Class> */ temp1 = src->klass_
+ __ LoadFromOffset(kLoadWord, temp1, src, class_offset);
+ __ MaybeUnpoisonHeapReference(temp1);
+ // /* HeapReference<Class> */ temp3 = temp1->component_type_
+ __ LoadFromOffset(kLoadWord, temp3, temp1, component_offset);
+ __ CompareAndBranchIfZero(temp3, intrinsic_slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(temp3);
+ }
+ // /* uint16_t */ temp3 = static_cast<uint16>(temp3->primitive_type_);
__ LoadFromOffset(kLoadUnsignedHalfword, temp3, temp3, primitive_offset);
static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
- __ CompareAndBranchIfNonZero(temp3, slow_path->GetEntryLabel());
+ __ CompareAndBranchIfNonZero(temp3, intrinsic_slow_path->GetEntryLabel());
}
- // Compute base source address, base destination address, and end source address.
-
int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
+ uint32_t element_size_shift = Primitive::ComponentSizeShift(Primitive::kPrimNot);
uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
+
+ // Compute the base source address in `temp1`.
if (src_pos.IsConstant()) {
int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
__ AddConstant(temp1, src, element_size * constant + offset);
} else {
- __ add(temp1, src, ShifterOperand(src_pos.AsRegister<Register>(), LSL, 2));
+ __ add(temp1, src, ShifterOperand(src_pos.AsRegister<Register>(), LSL, element_size_shift));
__ AddConstant(temp1, offset);
}
- if (dest_pos.IsConstant()) {
- int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
- __ AddConstant(temp2, dest, element_size * constant + offset);
- } else {
- __ add(temp2, dest, ShifterOperand(dest_pos.AsRegister<Register>(), LSL, 2));
- __ AddConstant(temp2, offset);
- }
-
+ // Compute the end source address in `temp3`.
if (length.IsConstant()) {
int32_t constant = length.GetConstant()->AsIntConstant()->GetValue();
__ AddConstant(temp3, temp1, element_size * constant);
} else {
- __ add(temp3, temp1, ShifterOperand(length.AsRegister<Register>(), LSL, 2));
+ __ add(temp3, temp1, ShifterOperand(length.AsRegister<Register>(), LSL, element_size_shift));
}
- // Iterate over the arrays and do a raw copy of the objects. We don't need to
- // poison/unpoison.
- Label loop, done;
- __ cmp(temp1, ShifterOperand(temp3));
- __ b(&done, EQ);
- __ Bind(&loop);
- __ ldr(IP, Address(temp1, element_size, Address::PostIndex));
- __ str(IP, Address(temp2, element_size, Address::PostIndex));
- __ cmp(temp1, ShifterOperand(temp3));
- __ b(&loop, NE);
- __ Bind(&done);
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // The base destination address is computed later, as `temp2` is
+ // used for intermediate computations.
+
+ // SystemArrayCopy implementation for Baker read barriers (see
+ // also CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier):
+ //
+ // if (src_ptr != end_ptr) {
+ // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
+ // lfence; // Load fence or artificial data dependency to prevent load-load reordering
+ // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // if (is_gray) {
+ // // Slow-path copy.
+ // do {
+ // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++)));
+ // } while (src_ptr != end_ptr)
+ // } else {
+ // // Fast-path copy.
+ // do {
+ // *dest_ptr++ = *src_ptr++;
+ // } while (src_ptr != end_ptr)
+ // }
+ // }
+
+ Label loop, done;
+
+ // Don't enter copy loop if `length == 0`.
+ __ cmp(temp1, ShifterOperand(temp3));
+ __ b(&done, EQ);
+
+ // /* int32_t */ monitor = src->monitor_
+ __ LoadFromOffset(kLoadWord, temp2, src, monitor_offset);
+ // /* LockWord */ lock_word = LockWord(monitor)
+ static_assert(sizeof(LockWord) == sizeof(int32_t),
+ "art::LockWord and int32_t have different sizes.");
+
+ // Introduce a dependency on the lock_word including the rb_state,
+ // which shall prevent load-load reordering without using
+ // a memory barrier (which would be more expensive).
+ // `src` is unchanged by this operation, but its value now depends
+ // on `temp2`.
+ __ add(src, src, ShifterOperand(temp2, LSR, 32));
+
+ // Slow path used to copy array when `src` is gray.
+ SlowPathCode* read_barrier_slow_path =
+ new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARM(invoke);
+ codegen_->AddSlowPath(read_barrier_slow_path);
+
+ // Given the numeric representation, it's enough to check the low bit of the
+ // rb_state. We do that by shifting the bit out of the lock word with LSRS
+ // which can be a 16-bit instruction unlike the TST immediate.
+ static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
+ static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ __ Lsrs(temp2, temp2, LockWord::kReadBarrierStateShift + 1);
+ // Carry flag is the last bit shifted out by LSRS.
+ __ b(read_barrier_slow_path->GetEntryLabel(), CS);
+
+ // Fast-path copy.
+
+ // Compute the base destination address in `temp2`.
+ if (dest_pos.IsConstant()) {
+ int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ AddConstant(temp2, dest, element_size * constant + offset);
+ } else {
+ __ add(temp2, dest, ShifterOperand(dest_pos.AsRegister<Register>(), LSL, element_size_shift));
+ __ AddConstant(temp2, offset);
+ }
+
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison.
+ __ Bind(&loop);
+ __ ldr(IP, Address(temp1, element_size, Address::PostIndex));
+ __ str(IP, Address(temp2, element_size, Address::PostIndex));
+ __ cmp(temp1, ShifterOperand(temp3));
+ __ b(&loop, NE);
+
+ __ Bind(read_barrier_slow_path->GetExitLabel());
+ __ Bind(&done);
+ } else {
+ // Non read barrier code.
+
+ // Compute the base destination address in `temp2`.
+ if (dest_pos.IsConstant()) {
+ int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ AddConstant(temp2, dest, element_size * constant + offset);
+ } else {
+ __ add(temp2, dest, ShifterOperand(dest_pos.AsRegister<Register>(), LSL, element_size_shift));
+ __ AddConstant(temp2, offset);
+ }
+
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison.
+ Label loop, done;
+ __ cmp(temp1, ShifterOperand(temp3));
+ __ b(&done, EQ);
+ __ Bind(&loop);
+ __ ldr(IP, Address(temp1, element_size, Address::PostIndex));
+ __ str(IP, Address(temp2, element_size, Address::PostIndex));
+ __ cmp(temp1, ShifterOperand(temp3));
+ __ b(&loop, NE);
+ __ Bind(&done);
+ }
// We only need one card marking on the destination array.
codegen_->MarkGCCard(temp1,
@@ -1651,7 +1923,7 @@
Register(kNoRegister),
/* value_can_be_null */ false);
- __ Bind(slow_path->GetExitLabel());
+ __ Bind(intrinsic_slow_path->GetExitLabel());
}
static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 9cfe3ce..91374b3 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -144,6 +144,73 @@
DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathARM64);
};
+// Slow path implementing the SystemArrayCopy intrinsic copy loop with read barriers.
+class ReadBarrierSystemArrayCopySlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ ReadBarrierSystemArrayCopySlowPathARM64(HInstruction* instruction, Location tmp)
+ : SlowPathCodeARM64(instruction), tmp_(tmp) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+ CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in);
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(locations->CanCall());
+ DCHECK(instruction_->IsInvokeStaticOrDirect())
+ << "Unexpected instruction in read barrier arraycopy slow path: "
+ << instruction_->DebugName();
+ DCHECK(instruction_->GetLocations()->Intrinsified());
+ DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kSystemArrayCopy);
+
+ const int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
+
+ Register src_curr_addr = XRegisterFrom(locations->GetTemp(0));
+ Register dst_curr_addr = XRegisterFrom(locations->GetTemp(1));
+ Register src_stop_addr = XRegisterFrom(locations->GetTemp(2));
+ Register tmp_reg = WRegisterFrom(tmp_);
+
+ __ Bind(GetEntryLabel());
+ vixl::aarch64::Label slow_copy_loop;
+ __ Bind(&slow_copy_loop);
+ __ Ldr(tmp_reg, MemOperand(src_curr_addr, element_size, PostIndex));
+ codegen->GetAssembler()->MaybeUnpoisonHeapReference(tmp_reg);
+ // TODO: Inline the mark bit check before calling the runtime?
+ // tmp_reg = ReadBarrier::Mark(tmp_reg);
+ // No need to save live registers; it's taken care of by the
+ // entrypoint. Also, there is no need to update the stack mask,
+ // as this runtime call will not trigger a garbage collection.
+ // (See ReadBarrierMarkSlowPathARM64::EmitNativeCode for more
+ // explanations.)
+ DCHECK_NE(tmp_.reg(), LR);
+ DCHECK_NE(tmp_.reg(), WSP);
+ DCHECK_NE(tmp_.reg(), WZR);
+ // IP0 is used internally by the ReadBarrierMarkRegX entry point
+ // as a temporary (and not preserved). It thus cannot be used by
+ // any live register in this slow path.
+ DCHECK_NE(LocationFrom(src_curr_addr).reg(), IP0);
+ DCHECK_NE(LocationFrom(dst_curr_addr).reg(), IP0);
+ DCHECK_NE(LocationFrom(src_stop_addr).reg(), IP0);
+ DCHECK_NE(tmp_.reg(), IP0);
+ DCHECK(0 <= tmp_.reg() && tmp_.reg() < kNumberOfWRegisters) << tmp_.reg();
+ int32_t entry_point_offset =
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(tmp_.reg());
+ // This runtime call does not require a stack map.
+ codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
+ codegen->GetAssembler()->MaybePoisonHeapReference(tmp_reg);
+ __ Str(tmp_reg, MemOperand(dst_curr_addr, element_size, PostIndex));
+ __ Cmp(src_curr_addr, src_stop_addr);
+ __ B(&slow_copy_loop, ne);
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathARM64"; }
+
+ private:
+ Location tmp_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathARM64);
+};
#undef __
bool IntrinsicLocationsBuilderARM64::TryDispatch(HInvoke* invoke) {
@@ -2035,9 +2102,9 @@
// We want to use two temporary registers in order to reduce the register pressure in arm64.
// So we don't use the CodeGenerator::CreateSystemArrayCopyLocationSummary.
void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopy(HInvoke* invoke) {
- // TODO(rpl): Implement read barriers in the SystemArrayCopy
- // intrinsic and re-enable it (b/29516905).
- if (kEmitCompilerReadBarrier) {
+ // The only read barrier implementation supporting the
+ // SystemArrayCopy intrinsic is the Baker-style read barriers.
+ if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -2090,12 +2157,20 @@
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // Temporary register IP0, obtained from the VIXL scratch register
+ // pool, cannot be used in ReadBarrierSystemArrayCopySlowPathARM64
+ // (because that register is clobbered by ReadBarrierMarkRegX
+ // entry points). Get an extra temporary register from the
+ // register allocator.
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
- // TODO(rpl): Implement read barriers in the SystemArrayCopy
- // intrinsic and re-enable it (b/29516905).
- DCHECK(!kEmitCompilerReadBarrier);
+ // The only read barrier implementation supporting the
+ // SystemArrayCopy intrinsic is the Baker-style read barriers.
+ DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
MacroAssembler* masm = GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -2104,6 +2179,7 @@
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+ uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
Register src = XRegisterFrom(locations->InAt(0));
Location src_pos = locations->InAt(1);
@@ -2111,10 +2187,12 @@
Location dest_pos = locations->InAt(3);
Location length = locations->InAt(4);
Register temp1 = WRegisterFrom(locations->GetTemp(0));
+ Location temp1_loc = LocationFrom(temp1);
Register temp2 = WRegisterFrom(locations->GetTemp(1));
+ Location temp2_loc = LocationFrom(temp2);
- SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCodeARM64* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ codegen_->AddSlowPath(intrinsic_slow_path);
vixl::aarch64::Label conditions_on_positions_validated;
SystemArrayCopyOptimizations optimizations(invoke);
@@ -2130,7 +2208,7 @@
DCHECK_GE(src_pos_constant, dest_pos_constant);
} else if (src_pos_constant < dest_pos_constant) {
__ Cmp(src, dest);
- __ B(slow_path->GetEntryLabel(), eq);
+ __ B(intrinsic_slow_path->GetEntryLabel(), eq);
}
// Checked when building locations.
DCHECK(!optimizations.GetDestinationIsSource()
@@ -2141,7 +2219,7 @@
__ B(&conditions_on_positions_validated, ne);
}
__ Cmp(WRegisterFrom(dest_pos), src_pos_constant);
- __ B(slow_path->GetEntryLabel(), gt);
+ __ B(intrinsic_slow_path->GetEntryLabel(), gt);
}
} else {
if (!optimizations.GetDestinationIsSource()) {
@@ -2150,19 +2228,19 @@
}
__ Cmp(RegisterFrom(src_pos, invoke->InputAt(1)->GetType()),
OperandFrom(dest_pos, invoke->InputAt(3)->GetType()));
- __ B(slow_path->GetEntryLabel(), lt);
+ __ B(intrinsic_slow_path->GetEntryLabel(), lt);
}
__ Bind(&conditions_on_positions_validated);
if (!optimizations.GetSourceIsNotNull()) {
// Bail out if the source is null.
- __ Cbz(src, slow_path->GetEntryLabel());
+ __ Cbz(src, intrinsic_slow_path->GetEntryLabel());
}
if (!optimizations.GetDestinationIsNotNull() && !optimizations.GetDestinationIsSource()) {
// Bail out if the destination is null.
- __ Cbz(dest, slow_path->GetEntryLabel());
+ __ Cbz(dest, intrinsic_slow_path->GetEntryLabel());
}
// We have already checked in the LocationsBuilder for the constant case.
@@ -2170,17 +2248,17 @@
!optimizations.GetCountIsSourceLength() &&
!optimizations.GetCountIsDestinationLength()) {
// If the length is negative, bail out.
- __ Tbnz(WRegisterFrom(length), kWRegSize - 1, slow_path->GetEntryLabel());
+ __ Tbnz(WRegisterFrom(length), kWRegSize - 1, intrinsic_slow_path->GetEntryLabel());
// If the length >= 128 then (currently) prefer native implementation.
__ Cmp(WRegisterFrom(length), kSystemArrayCopyThreshold);
- __ B(slow_path->GetEntryLabel(), ge);
+ __ B(intrinsic_slow_path->GetEntryLabel(), ge);
}
// Validity checks: source.
CheckSystemArrayCopyPosition(masm,
src_pos,
src,
length,
- slow_path,
+ intrinsic_slow_path,
temp1,
optimizations.GetCountIsSourceLength());
@@ -2189,90 +2267,236 @@
dest_pos,
dest,
length,
- slow_path,
+ intrinsic_slow_path,
temp1,
optimizations.GetCountIsDestinationLength());
{
// We use a block to end the scratch scope before the write barrier, thus
// freeing the temporary registers so they can be used in `MarkGCCard`.
UseScratchRegisterScope temps(masm);
+ // Note: Because it is acquired from VIXL's scratch register pool,
+ // `temp3` might be IP0, and thus cannot be used as `ref` argument
+ // of CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier
+ // calls below (see ReadBarrierMarkSlowPathARM64 for more details).
Register temp3 = temps.AcquireW();
+
if (!optimizations.GetDoesNotNeedTypeCheck()) {
// Check whether all elements of the source array are assignable to the component
// type of the destination array. We do two checks: the classes are the same,
// or the destination is Object[]. If none of these checks succeed, we go to the
// slow path.
- __ Ldr(temp1, MemOperand(dest, class_offset));
- __ Ldr(temp2, MemOperand(src, class_offset));
- bool did_unpoison = false;
- if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
- !optimizations.GetSourceIsNonPrimitiveArray()) {
- // One or two of the references need to be unpoisoned. Unpoison them
- // both to make the identity check valid.
- codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp1);
- codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp2);
- did_unpoison = true;
- }
- if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
- // Bail out if the destination is not a non primitive array.
- // /* HeapReference<Class> */ temp3 = temp1->component_type_
- __ Ldr(temp3, HeapOperand(temp1, component_offset));
- __ Cbz(temp3, slow_path->GetEntryLabel());
- codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp3);
- __ Ldrh(temp3, HeapOperand(temp3, primitive_offset));
- static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
- __ Cbnz(temp3, slow_path->GetEntryLabel());
- }
-
- if (!optimizations.GetSourceIsNonPrimitiveArray()) {
- // Bail out if the source is not a non primitive array.
- // /* HeapReference<Class> */ temp3 = temp2->component_type_
- __ Ldr(temp3, HeapOperand(temp2, component_offset));
- __ Cbz(temp3, slow_path->GetEntryLabel());
- codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp3);
- __ Ldrh(temp3, HeapOperand(temp3, primitive_offset));
- static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
- __ Cbnz(temp3, slow_path->GetEntryLabel());
- }
-
- __ Cmp(temp1, temp2);
-
- if (optimizations.GetDestinationIsTypedObjectArray()) {
- vixl::aarch64::Label do_copy;
- __ B(&do_copy, eq);
- if (!did_unpoison) {
- codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp1);
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+ // /* HeapReference<Class> */ temp1 = src->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
+ temp1_loc,
+ src.W(),
+ class_offset,
+ temp2,
+ /* needs_null_check */ false,
+ /* use_load_acquire */ false);
+ // Bail out if the source is not a non primitive array.
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
+ temp1_loc,
+ temp1,
+ component_offset,
+ temp2,
+ /* needs_null_check */ false,
+ /* use_load_acquire */ false);
+ __ Cbz(temp1, intrinsic_slow_path->GetEntryLabel());
+ // If heap poisoning is enabled, `temp1` has been unpoisoned
+ // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ // /* uint16_t */ temp1 = static_cast<uint16>(temp1->primitive_type_);
+ __ Ldrh(temp1, HeapOperand(temp1, primitive_offset));
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ Cbnz(temp1, intrinsic_slow_path->GetEntryLabel());
}
- // /* HeapReference<Class> */ temp1 = temp1->component_type_
- __ Ldr(temp1, HeapOperand(temp1, component_offset));
- codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp1);
- // /* HeapReference<Class> */ temp1 = temp1->super_class_
- __ Ldr(temp1, HeapOperand(temp1, super_offset));
- // No need to unpoison the result, we're comparing against null.
- __ Cbnz(temp1, slow_path->GetEntryLabel());
- __ Bind(&do_copy);
+
+ // /* HeapReference<Class> */ temp1 = dest->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
+ temp1_loc,
+ dest.W(),
+ class_offset,
+ temp2,
+ /* needs_null_check */ false,
+ /* use_load_acquire */ false);
+
+ if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
+ // Bail out if the destination is not a non primitive array.
+ //
+ // Register `temp1` is not trashed by the read barrier emitted
+ // by GenerateFieldLoadWithBakerReadBarrier below, as that
+ // method produces a call to a ReadBarrierMarkRegX entry point,
+ // which saves all potentially live registers, including
+ // temporaries such a `temp1`.
+ // /* HeapReference<Class> */ temp2 = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
+ temp2_loc,
+ temp1,
+ component_offset,
+ temp3,
+ /* needs_null_check */ false,
+ /* use_load_acquire */ false);
+ __ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
+ // If heap poisoning is enabled, `temp2` has been unpoisoned
+ // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ // /* uint16_t */ temp2 = static_cast<uint16>(temp2->primitive_type_);
+ __ Ldrh(temp2, HeapOperand(temp2, primitive_offset));
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ Cbnz(temp2, intrinsic_slow_path->GetEntryLabel());
+ }
+
+ // For the same reason given earlier, `temp1` is not trashed by the
+ // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
+ // /* HeapReference<Class> */ temp2 = src->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
+ temp2_loc,
+ src.W(),
+ class_offset,
+ temp3,
+ /* needs_null_check */ false,
+ /* use_load_acquire */ false);
+ // Note: if heap poisoning is on, we are comparing two unpoisoned references here.
+ __ Cmp(temp1, temp2);
+
+ if (optimizations.GetDestinationIsTypedObjectArray()) {
+ vixl::aarch64::Label do_copy;
+ __ B(&do_copy, eq);
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
+ temp1_loc,
+ temp1,
+ component_offset,
+ temp2,
+ /* needs_null_check */ false,
+ /* use_load_acquire */ false);
+ // /* HeapReference<Class> */ temp1 = temp1->super_class_
+ // We do not need to emit a read barrier for the following
+ // heap reference load, as `temp1` is only used in a
+ // comparison with null below, and this reference is not
+ // kept afterwards.
+ __ Ldr(temp1, HeapOperand(temp1, super_offset));
+ __ Cbnz(temp1, intrinsic_slow_path->GetEntryLabel());
+ __ Bind(&do_copy);
+ } else {
+ __ B(intrinsic_slow_path->GetEntryLabel(), ne);
+ }
} else {
- __ B(slow_path->GetEntryLabel(), ne);
+ // Non read barrier code.
+
+ // /* HeapReference<Class> */ temp1 = dest->klass_
+ __ Ldr(temp1, MemOperand(dest, class_offset));
+ // /* HeapReference<Class> */ temp2 = src->klass_
+ __ Ldr(temp2, MemOperand(src, class_offset));
+ bool did_unpoison = false;
+ if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
+ !optimizations.GetSourceIsNonPrimitiveArray()) {
+ // One or two of the references need to be unpoisoned. Unpoison them
+ // both to make the identity check valid.
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp1);
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp2);
+ did_unpoison = true;
+ }
+
+ if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
+ // Bail out if the destination is not a non primitive array.
+ // /* HeapReference<Class> */ temp3 = temp1->component_type_
+ __ Ldr(temp3, HeapOperand(temp1, component_offset));
+ __ Cbz(temp3, intrinsic_slow_path->GetEntryLabel());
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp3);
+ // /* uint16_t */ temp3 = static_cast<uint16>(temp3->primitive_type_);
+ __ Ldrh(temp3, HeapOperand(temp3, primitive_offset));
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ Cbnz(temp3, intrinsic_slow_path->GetEntryLabel());
+ }
+
+ if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+ // Bail out if the source is not a non primitive array.
+ // /* HeapReference<Class> */ temp3 = temp2->component_type_
+ __ Ldr(temp3, HeapOperand(temp2, component_offset));
+ __ Cbz(temp3, intrinsic_slow_path->GetEntryLabel());
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp3);
+ // /* uint16_t */ temp3 = static_cast<uint16>(temp3->primitive_type_);
+ __ Ldrh(temp3, HeapOperand(temp3, primitive_offset));
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ Cbnz(temp3, intrinsic_slow_path->GetEntryLabel());
+ }
+
+ __ Cmp(temp1, temp2);
+
+ if (optimizations.GetDestinationIsTypedObjectArray()) {
+ vixl::aarch64::Label do_copy;
+ __ B(&do_copy, eq);
+ if (!did_unpoison) {
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp1);
+ }
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ __ Ldr(temp1, HeapOperand(temp1, component_offset));
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp1);
+ // /* HeapReference<Class> */ temp1 = temp1->super_class_
+ __ Ldr(temp1, HeapOperand(temp1, super_offset));
+ // No need to unpoison the result, we're comparing against null.
+ __ Cbnz(temp1, intrinsic_slow_path->GetEntryLabel());
+ __ Bind(&do_copy);
+ } else {
+ __ B(intrinsic_slow_path->GetEntryLabel(), ne);
+ }
}
} else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
// Bail out if the source is not a non primitive array.
- // /* HeapReference<Class> */ temp1 = src->klass_
- __ Ldr(temp1, HeapOperand(src.W(), class_offset));
- codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp1);
- // /* HeapReference<Class> */ temp3 = temp1->component_type_
- __ Ldr(temp3, HeapOperand(temp1, component_offset));
- __ Cbz(temp3, slow_path->GetEntryLabel());
- codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp3);
- __ Ldrh(temp3, HeapOperand(temp3, primitive_offset));
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // /* HeapReference<Class> */ temp1 = src->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
+ temp1_loc,
+ src.W(),
+ class_offset,
+ temp2,
+ /* needs_null_check */ false,
+ /* use_load_acquire */ false);
+ // /* HeapReference<Class> */ temp2 = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
+ temp2_loc,
+ temp1,
+ component_offset,
+ temp3,
+ /* needs_null_check */ false,
+ /* use_load_acquire */ false);
+ __ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
+ // If heap poisoning is enabled, `temp2` has been unpoisoned
+ // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ } else {
+ // /* HeapReference<Class> */ temp1 = src->klass_
+ __ Ldr(temp1, HeapOperand(src.W(), class_offset));
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp1);
+ // /* HeapReference<Class> */ temp2 = temp1->component_type_
+ __ Ldr(temp2, HeapOperand(temp1, component_offset));
+ __ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp2);
+ }
+ // /* uint16_t */ temp2 = static_cast<uint16>(temp2->primitive_type_);
+ __ Ldrh(temp2, HeapOperand(temp2, primitive_offset));
static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
- __ Cbnz(temp3, slow_path->GetEntryLabel());
+ __ Cbnz(temp2, intrinsic_slow_path->GetEntryLabel());
}
Register src_curr_addr = temp1.X();
Register dst_curr_addr = temp2.X();
- Register src_stop_addr = temp3.X();
+ Register src_stop_addr;
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // Temporary register IP0, obtained from the VIXL scratch
+ // register pool as `temp3`, cannot be used in
+ // ReadBarrierSystemArrayCopySlowPathARM64 (because that
+ // register is clobbered by ReadBarrierMarkRegX entry points).
+ // So another temporary register allocated by the register
+ // allocator instead.
+ DCHECK_EQ(LocationFrom(temp3).reg(), IP0);
+ src_stop_addr = XRegisterFrom(locations->GetTemp(2));
+ } else {
+ src_stop_addr = temp3.X();
+ }
GenSystemArrayCopyAddresses(masm,
Primitive::kPrimNot,
@@ -2285,25 +2509,98 @@
dst_curr_addr,
src_stop_addr);
- // Iterate over the arrays and do a raw copy of the objects. We don't need to
- // poison/unpoison.
- vixl::aarch64::Label loop, done;
const int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
- __ Bind(&loop);
- __ Cmp(src_curr_addr, src_stop_addr);
- __ B(&done, eq);
- {
+
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // SystemArrayCopy implementation for Baker read barriers (see
+ // also CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier):
+ //
+ // if (src_ptr != end_ptr) {
+ // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
+ // lfence; // Load fence or artificial data dependency to prevent load-load reordering
+ // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // if (is_gray) {
+ // // Slow-path copy.
+ // do {
+ // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++)));
+ // } while (src_ptr != end_ptr)
+ // } else {
+ // // Fast-path copy.
+ // do {
+ // *dest_ptr++ = *src_ptr++;
+ // } while (src_ptr != end_ptr)
+ // }
+ // }
+
+ vixl::aarch64::Label loop, done;
+
+ // Don't enter copy loop if `length == 0`.
+ __ Cmp(src_curr_addr, src_stop_addr);
+ __ B(&done, eq);
+
Register tmp = temps.AcquireW();
+ // Make sure `tmp` is not IP0, as it is clobbered by
+ // ReadBarrierMarkRegX entry points in
+ // ReadBarrierSystemArrayCopySlowPathARM64.
+ DCHECK_NE(LocationFrom(tmp).reg(), IP0);
+
+ // /* int32_t */ monitor = src->monitor_
+ __ Ldr(tmp, HeapOperand(src.W(), monitor_offset));
+ // /* LockWord */ lock_word = LockWord(monitor)
+ static_assert(sizeof(LockWord) == sizeof(int32_t),
+ "art::LockWord and int32_t have different sizes.");
+
+ // Introduce a dependency on the lock_word including rb_state,
+ // to prevent load-load reordering, and without using
+ // a memory barrier (which would be more expensive).
+ // `src` is unchanged by this operation, but its value now depends
+ // on `tmp`.
+ __ Add(src.X(), src.X(), Operand(tmp.X(), LSR, 32));
+
+ // Slow path used to copy array when `src` is gray.
+ SlowPathCodeARM64* read_barrier_slow_path =
+ new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARM64(invoke, LocationFrom(tmp));
+ codegen_->AddSlowPath(read_barrier_slow_path);
+
+ // Given the numeric representation, it's enough to check the low bit of the rb_state.
+ static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
+ static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ __ Tbnz(tmp, LockWord::kReadBarrierStateShift, read_barrier_slow_path->GetEntryLabel());
+
+ // Fast-path copy.
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison.
+ __ Bind(&loop);
__ Ldr(tmp, MemOperand(src_curr_addr, element_size, PostIndex));
__ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex));
+ __ Cmp(src_curr_addr, src_stop_addr);
+ __ B(&loop, ne);
+
+ __ Bind(read_barrier_slow_path->GetExitLabel());
+ __ Bind(&done);
+ } else {
+ // Non read barrier code.
+
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison.
+ vixl::aarch64::Label loop, done;
+ __ Bind(&loop);
+ __ Cmp(src_curr_addr, src_stop_addr);
+ __ B(&done, eq);
+ {
+ Register tmp = temps.AcquireW();
+ __ Ldr(tmp, MemOperand(src_curr_addr, element_size, PostIndex));
+ __ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex));
+ }
+ __ B(&loop);
+ __ Bind(&done);
}
- __ B(&loop);
- __ Bind(&done);
}
// We only need one card marking on the destination array.
codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null */ false);
- __ Bind(slow_path->GetExitLabel());
+ __ Bind(intrinsic_slow_path->GetExitLabel());
}
static void GenIsInfinite(LocationSummary* locations,
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 22f4181..49d6c19 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -70,6 +70,105 @@
using IntrinsicSlowPathX86 = IntrinsicSlowPath<InvokeDexCallingConventionVisitorX86>;
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<X86Assembler*>(codegen->GetAssembler())-> // NOLINT
+
+// Slow path implementing the SystemArrayCopy intrinsic copy loop with read barriers.
+class ReadBarrierSystemArrayCopySlowPathX86 : public SlowPathCode {
+ public:
+ explicit ReadBarrierSystemArrayCopySlowPathX86(HInstruction* instruction)
+ : SlowPathCode(instruction) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(locations->CanCall());
+ DCHECK(instruction_->IsInvokeStaticOrDirect())
+ << "Unexpected instruction in read barrier arraycopy slow path: "
+ << instruction_->DebugName();
+ DCHECK(instruction_->GetLocations()->Intrinsified());
+ DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kSystemArrayCopy);
+
+ int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
+ uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
+
+ Register src = locations->InAt(0).AsRegister<Register>();
+ Location src_pos = locations->InAt(1);
+ Register dest = locations->InAt(2).AsRegister<Register>();
+ Location dest_pos = locations->InAt(3);
+ Location length = locations->InAt(4);
+ Location temp1_loc = locations->GetTemp(0);
+ Register temp1 = temp1_loc.AsRegister<Register>();
+ Register temp2 = locations->GetTemp(1).AsRegister<Register>();
+ Register temp3 = locations->GetTemp(2).AsRegister<Register>();
+
+ __ Bind(GetEntryLabel());
+ // In this code path, registers `temp1`, `temp2`, and `temp3`
+ // (resp.) are not used for the base source address, the base
+ // destination address, and the end source address (resp.), as in
+ // other SystemArrayCopy intrinsic code paths. Instead they are
+ // (resp.) used for:
+ // - the loop index (`i`);
+ // - the source index (`src_index`) and the loaded (source)
+ // reference (`value`); and
+ // - the destination index (`dest_index`).
+
+ // i = 0
+ __ xorl(temp1, temp1);
+ NearLabel loop;
+ __ Bind(&loop);
+ // value = src_array[i + src_pos]
+ if (src_pos.IsConstant()) {
+ int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
+ int32_t adjusted_offset = offset + constant * element_size;
+ __ movl(temp2, Address(src, temp1, ScaleFactor::TIMES_4, adjusted_offset));
+ } else {
+ __ leal(temp2, Address(src_pos.AsRegister<Register>(), temp1, ScaleFactor::TIMES_1, 0));
+ __ movl(temp2, Address(src, temp2, ScaleFactor::TIMES_4, offset));
+ }
+ __ MaybeUnpoisonHeapReference(temp2);
+ // TODO: Inline the mark bit check before calling the runtime?
+ // value = ReadBarrier::Mark(value)
+ // No need to save live registers; it's taken care of by the
+ // entrypoint. Also, there is no need to update the stack mask,
+ // as this runtime call will not trigger a garbage collection.
+ // (See ReadBarrierMarkSlowPathX86::EmitNativeCode for more
+ // explanations.)
+ DCHECK_NE(temp2, ESP);
+ DCHECK(0 <= temp2 && temp2 < kNumberOfCpuRegisters) << temp2;
+ int32_t entry_point_offset =
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86PointerSize>(temp2);
+ // This runtime call does not require a stack map.
+ x86_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
+ __ MaybePoisonHeapReference(temp2);
+ // dest_array[i + dest_pos] = value
+ if (dest_pos.IsConstant()) {
+ int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ int32_t adjusted_offset = offset + constant * element_size;
+ __ movl(Address(dest, temp1, ScaleFactor::TIMES_4, adjusted_offset), temp2);
+ } else {
+ __ leal(temp3, Address(dest_pos.AsRegister<Register>(), temp1, ScaleFactor::TIMES_1, 0));
+ __ movl(Address(dest, temp3, ScaleFactor::TIMES_4, offset), temp2);
+ }
+ // ++i
+ __ addl(temp1, Immediate(1));
+ // if (i != length) goto loop
+ x86_codegen->GenerateIntCompare(temp1_loc, length);
+ __ j(kNotEqual, &loop);
+ __ jmp(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathX86"; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathX86);
+};
+
+#undef __
+
#define __ assembler->
static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke, bool is64bit) {
@@ -2678,9 +2777,9 @@
}
void IntrinsicLocationsBuilderX86::VisitSystemArrayCopy(HInvoke* invoke) {
- // TODO(rpl): Implement read barriers in the SystemArrayCopy
- // intrinsic and re-enable it (b/29516905).
- if (kEmitCompilerReadBarrier) {
+ // The only read barrier implementation supporting the
+ // SystemArrayCopy intrinsic is the Baker-style read barriers.
+ if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -2710,9 +2809,9 @@
}
void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
- // TODO(rpl): Implement read barriers in the SystemArrayCopy
- // intrinsic and re-enable it (b/29516905).
- DCHECK(!kEmitCompilerReadBarrier);
+ // The only read barrier implementation supporting the
+ // SystemArrayCopy intrinsic is the Baker-style read barriers.
+ DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
X86Assembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -2721,17 +2820,21 @@
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+ uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
Register src = locations->InAt(0).AsRegister<Register>();
Location src_pos = locations->InAt(1);
Register dest = locations->InAt(2).AsRegister<Register>();
Location dest_pos = locations->InAt(3);
- Location length = locations->InAt(4);
- Register temp1 = locations->GetTemp(0).AsRegister<Register>();
- Register temp2 = locations->GetTemp(1).AsRegister<Register>();
+ Location length_arg = locations->InAt(4);
+ Location length = length_arg;
+ Location temp1_loc = locations->GetTemp(0);
+ Register temp1 = temp1_loc.AsRegister<Register>();
+ Location temp2_loc = locations->GetTemp(1);
+ Register temp2 = temp2_loc.AsRegister<Register>();
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCode* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ codegen_->AddSlowPath(intrinsic_slow_path);
NearLabel conditions_on_positions_validated;
SystemArrayCopyOptimizations optimizations(invoke);
@@ -2747,7 +2850,7 @@
DCHECK_GE(src_pos_constant, dest_pos_constant);
} else if (src_pos_constant < dest_pos_constant) {
__ cmpl(src, dest);
- __ j(kEqual, slow_path->GetEntryLabel());
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
}
} else {
if (!optimizations.GetDestinationIsSource()) {
@@ -2755,7 +2858,7 @@
__ j(kNotEqual, &conditions_on_positions_validated);
}
__ cmpl(dest_pos.AsRegister<Register>(), Immediate(src_pos_constant));
- __ j(kGreater, slow_path->GetEntryLabel());
+ __ j(kGreater, intrinsic_slow_path->GetEntryLabel());
}
} else {
if (!optimizations.GetDestinationIsSource()) {
@@ -2765,10 +2868,10 @@
if (dest_pos.IsConstant()) {
int32_t dest_pos_constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
__ cmpl(src_pos.AsRegister<Register>(), Immediate(dest_pos_constant));
- __ j(kLess, slow_path->GetEntryLabel());
+ __ j(kLess, intrinsic_slow_path->GetEntryLabel());
} else {
__ cmpl(src_pos.AsRegister<Register>(), dest_pos.AsRegister<Register>());
- __ j(kLess, slow_path->GetEntryLabel());
+ __ j(kLess, intrinsic_slow_path->GetEntryLabel());
}
}
@@ -2777,16 +2880,17 @@
if (!optimizations.GetSourceIsNotNull()) {
// Bail out if the source is null.
__ testl(src, src);
- __ j(kEqual, slow_path->GetEntryLabel());
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
}
if (!optimizations.GetDestinationIsNotNull() && !optimizations.GetDestinationIsSource()) {
// Bail out if the destination is null.
__ testl(dest, dest);
- __ j(kEqual, slow_path->GetEntryLabel());
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
}
- Register temp3 = locations->GetTemp(2).AsRegister<Register>();
+ Location temp3_loc = locations->GetTemp(2);
+ Register temp3 = temp3_loc.AsRegister<Register>();
if (length.IsStackSlot()) {
__ movl(temp3, Address(ESP, length.GetStackIndex()));
length = Location::RegisterLocation(temp3);
@@ -2798,7 +2902,7 @@
!optimizations.GetCountIsSourceLength() &&
!optimizations.GetCountIsDestinationLength()) {
__ testl(length.AsRegister<Register>(), length.AsRegister<Register>());
- __ j(kLess, slow_path->GetEntryLabel());
+ __ j(kLess, intrinsic_slow_path->GetEntryLabel());
}
// Validity checks: source.
@@ -2806,7 +2910,7 @@
src_pos,
src,
length,
- slow_path,
+ intrinsic_slow_path,
temp1,
optimizations.GetCountIsSourceLength());
@@ -2815,7 +2919,7 @@
dest_pos,
dest,
length,
- slow_path,
+ intrinsic_slow_path,
temp1,
optimizations.GetCountIsDestinationLength());
@@ -2824,72 +2928,159 @@
// type of the destination array. We do two checks: the classes are the same,
// or the destination is Object[]. If none of these checks succeed, we go to the
// slow path.
+
if (!optimizations.GetSourceIsNonPrimitiveArray()) {
- // /* HeapReference<Class> */ temp1 = temp1->klass_
- __ movl(temp1, Address(src, class_offset));
- __ MaybeUnpoisonHeapReference(temp1);
- // Bail out if the source is not a non primitive array.
- // /* HeapReference<Class> */ temp1 = temp1->component_type_
- __ movl(temp1, Address(temp1, component_offset));
- __ testl(temp1, temp1);
- __ j(kEqual, slow_path->GetEntryLabel());
- __ MaybeUnpoisonHeapReference(temp1);
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // /* HeapReference<Class> */ temp1 = src->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+ // Bail out if the source is not a non primitive array.
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+ __ testl(temp1, temp1);
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
+ // If heap poisoning is enabled, `temp1` has been unpoisoned
+ // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ } else {
+ // /* HeapReference<Class> */ temp1 = src->klass_
+ __ movl(temp1, Address(src, class_offset));
+ __ MaybeUnpoisonHeapReference(temp1);
+ // Bail out if the source is not a non primitive array.
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ __ movl(temp1, Address(temp1, component_offset));
+ __ testl(temp1, temp1);
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(temp1);
+ }
__ cmpw(Address(temp1, primitive_offset), Immediate(Primitive::kPrimNot));
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
}
- if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
- // /* HeapReference<Class> */ temp1 = temp1->klass_
- __ movl(temp1, Address(dest, class_offset));
- __ MaybeUnpoisonHeapReference(temp1);
- // Bail out if the destination is not a non primitive array.
- // /* HeapReference<Class> */ temp2 = temp1->component_type_
- __ movl(temp2, Address(temp1, component_offset));
- __ testl(temp2, temp2);
- __ j(kEqual, slow_path->GetEntryLabel());
- __ MaybeUnpoisonHeapReference(temp2);
- __ cmpw(Address(temp2, primitive_offset), Immediate(Primitive::kPrimNot));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- // Re-poison the heap reference to make the compare instruction below
- // compare two poisoned references.
- __ PoisonHeapReference(temp1);
- } else {
- // /* HeapReference<Class> */ temp1 = temp1->klass_
- __ movl(temp1, Address(dest, class_offset));
- }
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (length.Equals(Location::RegisterLocation(temp3))) {
+ // When Baker read barriers are enabled, register `temp3`,
+ // which in the present case contains the `length` parameter,
+ // will be overwritten below. Make the `length` location
+ // reference the original stack location; it will be moved
+ // back to `temp3` later if necessary.
+ DCHECK(length_arg.IsStackSlot());
+ length = length_arg;
+ }
- // Note: if poisoning is on, we are here comparing two poisoned references.
- __ cmpl(temp1, Address(src, class_offset));
+ // /* HeapReference<Class> */ temp1 = dest->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check */ false);
- if (optimizations.GetDestinationIsTypedObjectArray()) {
- NearLabel do_copy;
- __ j(kEqual, &do_copy);
- __ MaybeUnpoisonHeapReference(temp1);
- // /* HeapReference<Class> */ temp1 = temp1->component_type_
- __ movl(temp1, Address(temp1, component_offset));
- __ MaybeUnpoisonHeapReference(temp1);
- __ cmpl(Address(temp1, super_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ Bind(&do_copy);
+ if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
+ // Bail out if the destination is not a non primitive array.
+ //
+ // Register `temp1` is not trashed by the read barrier emitted
+ // by GenerateFieldLoadWithBakerReadBarrier below, as that
+ // method produces a call to a ReadBarrierMarkRegX entry point,
+ // which saves all potentially live registers, including
+ // temporaries such a `temp1`.
+ // /* HeapReference<Class> */ temp2 = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check */ false);
+ __ testl(temp2, temp2);
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
+ // If heap poisoning is enabled, `temp2` has been unpoisoned
+ // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ __ cmpw(Address(temp2, primitive_offset), Immediate(Primitive::kPrimNot));
+ __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
+ }
+
+ // For the same reason given earlier, `temp1` is not trashed by the
+ // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
+ // /* HeapReference<Class> */ temp2 = src->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check */ false);
+ // Note: if heap poisoning is on, we are comparing two unpoisoned references here.
+ __ cmpl(temp1, temp2);
+
+ if (optimizations.GetDestinationIsTypedObjectArray()) {
+ NearLabel do_copy;
+ __ j(kEqual, &do_copy);
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+ // We do not need to emit a read barrier for the following
+ // heap reference load, as `temp1` is only used in a
+ // comparison with null below, and this reference is not
+ // kept afterwards.
+ __ cmpl(Address(temp1, super_offset), Immediate(0));
+ __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
+ __ Bind(&do_copy);
+ } else {
+ __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
+ }
} else {
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ // Non read barrier code.
+
+ // /* HeapReference<Class> */ temp1 = dest->klass_
+ __ movl(temp1, Address(dest, class_offset));
+ if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
+ __ MaybeUnpoisonHeapReference(temp1);
+ // Bail out if the destination is not a non primitive array.
+ // /* HeapReference<Class> */ temp2 = temp1->component_type_
+ __ movl(temp2, Address(temp1, component_offset));
+ __ testl(temp2, temp2);
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(temp2);
+ __ cmpw(Address(temp2, primitive_offset), Immediate(Primitive::kPrimNot));
+ __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
+ // Re-poison the heap reference to make the compare instruction below
+ // compare two poisoned references.
+ __ PoisonHeapReference(temp1);
+ }
+
+ // Note: if heap poisoning is on, we are comparing two poisoned references here.
+ __ cmpl(temp1, Address(src, class_offset));
+
+ if (optimizations.GetDestinationIsTypedObjectArray()) {
+ NearLabel do_copy;
+ __ j(kEqual, &do_copy);
+ __ MaybeUnpoisonHeapReference(temp1);
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ __ movl(temp1, Address(temp1, component_offset));
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ cmpl(Address(temp1, super_offset), Immediate(0));
+ __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
+ __ Bind(&do_copy);
+ } else {
+ __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
+ }
}
} else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
// Bail out if the source is not a non primitive array.
- // /* HeapReference<Class> */ temp1 = src->klass_
- __ movl(temp1, Address(src, class_offset));
- __ MaybeUnpoisonHeapReference(temp1);
- // /* HeapReference<Class> */ temp1 = temp1->component_type_
- __ movl(temp1, Address(temp1, component_offset));
- __ testl(temp1, temp1);
- __ j(kEqual, slow_path->GetEntryLabel());
- __ MaybeUnpoisonHeapReference(temp1);
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // /* HeapReference<Class> */ temp1 = src->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+ __ testl(temp1, temp1);
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
+ // If heap poisoning is enabled, `temp1` has been unpoisoned
+ // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ } else {
+ // /* HeapReference<Class> */ temp1 = src->klass_
+ __ movl(temp1, Address(src, class_offset));
+ __ MaybeUnpoisonHeapReference(temp1);
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ __ movl(temp1, Address(temp1, component_offset));
+ __ testl(temp1, temp1);
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(temp1);
+ }
__ cmpw(Address(temp1, primitive_offset), Immediate(Primitive::kPrimNot));
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
}
- // Compute base source address, base destination address, and end source address.
+ // Compute the base source address in `temp1`.
int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
DCHECK_EQ(element_size, 4);
uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
@@ -2900,35 +3091,136 @@
__ leal(temp1, Address(src, src_pos.AsRegister<Register>(), ScaleFactor::TIMES_4, offset));
}
- if (dest_pos.IsConstant()) {
- int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
- __ leal(temp2, Address(dest, element_size * constant + offset));
- } else {
- __ leal(temp2, Address(dest, dest_pos.AsRegister<Register>(), ScaleFactor::TIMES_4, offset));
- }
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // If it is needed (in the case of the fast-path loop), the base
+ // destination address is computed later, as `temp2` is used for
+ // intermediate computations.
- if (length.IsConstant()) {
- int32_t constant = length.GetConstant()->AsIntConstant()->GetValue();
- __ leal(temp3, Address(temp1, element_size * constant));
- } else {
- __ leal(temp3, Address(temp1, length.AsRegister<Register>(), ScaleFactor::TIMES_4, 0));
- }
+ // Compute the end source address in `temp3`.
+ if (length.IsConstant()) {
+ int32_t constant = length.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(temp3, Address(temp1, element_size * constant));
+ } else {
+ if (length.IsStackSlot()) {
+ // Location `length` is again pointing at a stack slot, as
+ // register `temp3` (which was containing the length parameter
+ // earlier) has been overwritten; restore it now
+ DCHECK(length.Equals(length_arg));
+ __ movl(temp3, Address(ESP, length.GetStackIndex()));
+ length = Location::RegisterLocation(temp3);
+ }
+ __ leal(temp3, Address(temp1, length.AsRegister<Register>(), ScaleFactor::TIMES_4, 0));
+ }
- // Iterate over the arrays and do a raw copy of the objects. We don't need to
- // poison/unpoison.
- NearLabel loop, done;
- __ cmpl(temp1, temp3);
- __ j(kEqual, &done);
- __ Bind(&loop);
- __ pushl(Address(temp1, 0));
- __ cfi().AdjustCFAOffset(4);
- __ popl(Address(temp2, 0));
- __ cfi().AdjustCFAOffset(-4);
- __ addl(temp1, Immediate(element_size));
- __ addl(temp2, Immediate(element_size));
- __ cmpl(temp1, temp3);
- __ j(kNotEqual, &loop);
- __ Bind(&done);
+ // SystemArrayCopy implementation for Baker read barriers (see
+ // also CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier):
+ //
+ // if (src_ptr != end_ptr) {
+ // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
+ // lfence; // Load fence or artificial data dependency to prevent load-load reordering
+ // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // if (is_gray) {
+ // // Slow-path copy.
+ // for (size_t i = 0; i != length; ++i) {
+ // dest_array[dest_pos + i] =
+ // MaybePoison(ReadBarrier::Mark(MaybeUnpoison(src_array[src_pos + i])));
+ // }
+ // } else {
+ // // Fast-path copy.
+ // do {
+ // *dest_ptr++ = *src_ptr++;
+ // } while (src_ptr != end_ptr)
+ // }
+ // }
+
+ NearLabel loop, done;
+
+ // Don't enter copy loop if `length == 0`.
+ __ cmpl(temp1, temp3);
+ __ j(kEqual, &done);
+
+ // /* int32_t */ monitor = src->monitor_
+ __ movl(temp2, Address(src, monitor_offset));
+ // /* LockWord */ lock_word = LockWord(monitor)
+ static_assert(sizeof(LockWord) == sizeof(int32_t),
+ "art::LockWord and int32_t have different sizes.");
+
+ // Load fence to prevent load-load reordering.
+ // Note that this is a no-op, thanks to the x86 memory model.
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+
+ // Slow path used to copy array when `src` is gray.
+ SlowPathCode* read_barrier_slow_path =
+ new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathX86(invoke);
+ codegen_->AddSlowPath(read_barrier_slow_path);
+
+ // Given the numeric representation, it's enough to check the low bit of the
+ // rb_state. We do that by shifting the bit out of the lock word with SHR.
+ static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
+ static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ __ shrl(temp2, Immediate(LockWord::kReadBarrierStateShift + 1));
+ __ j(kCarrySet, read_barrier_slow_path->GetEntryLabel());
+
+ // Fast-path copy.
+
+ // Set the base destination address in `temp2`.
+ if (dest_pos.IsConstant()) {
+ int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(temp2, Address(dest, element_size * constant + offset));
+ } else {
+ __ leal(temp2, Address(dest, dest_pos.AsRegister<Register>(), ScaleFactor::TIMES_4, offset));
+ }
+
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison.
+ __ Bind(&loop);
+ __ pushl(Address(temp1, 0));
+ __ cfi().AdjustCFAOffset(4);
+ __ popl(Address(temp2, 0));
+ __ cfi().AdjustCFAOffset(-4);
+ __ addl(temp1, Immediate(element_size));
+ __ addl(temp2, Immediate(element_size));
+ __ cmpl(temp1, temp3);
+ __ j(kNotEqual, &loop);
+
+ __ Bind(read_barrier_slow_path->GetExitLabel());
+ __ Bind(&done);
+ } else {
+ // Non read barrier code.
+
+ // Compute the base destination address in `temp2`.
+ if (dest_pos.IsConstant()) {
+ int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(temp2, Address(dest, element_size * constant + offset));
+ } else {
+ __ leal(temp2, Address(dest, dest_pos.AsRegister<Register>(), ScaleFactor::TIMES_4, offset));
+ }
+
+ // Compute the end source address in `temp3`.
+ if (length.IsConstant()) {
+ int32_t constant = length.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(temp3, Address(temp1, element_size * constant));
+ } else {
+ __ leal(temp3, Address(temp1, length.AsRegister<Register>(), ScaleFactor::TIMES_4, 0));
+ }
+
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison.
+ NearLabel loop, done;
+ __ cmpl(temp1, temp3);
+ __ j(kEqual, &done);
+ __ Bind(&loop);
+ __ pushl(Address(temp1, 0));
+ __ cfi().AdjustCFAOffset(4);
+ __ popl(Address(temp2, 0));
+ __ cfi().AdjustCFAOffset(-4);
+ __ addl(temp1, Immediate(element_size));
+ __ addl(temp2, Immediate(element_size));
+ __ cmpl(temp1, temp3);
+ __ j(kNotEqual, &loop);
+ __ Bind(&done);
+ }
// We only need one card marking on the destination array.
codegen_->MarkGCCard(temp1,
@@ -2937,7 +3229,7 @@
Register(kNoRegister),
/* value_can_be_null */ false);
- __ Bind(slow_path->GetExitLabel());
+ __ Bind(intrinsic_slow_path->GetExitLabel());
}
UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index ab8b05c..311e1cd 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -64,6 +64,65 @@
using IntrinsicSlowPathX86_64 = IntrinsicSlowPath<InvokeDexCallingConventionVisitorX86_64>;
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())-> // NOLINT
+
+// Slow path implementing the SystemArrayCopy intrinsic copy loop with read barriers.
+class ReadBarrierSystemArrayCopySlowPathX86_64 : public SlowPathCode {
+ public:
+ explicit ReadBarrierSystemArrayCopySlowPathX86_64(HInstruction* instruction)
+ : SlowPathCode(instruction) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(locations->CanCall());
+ DCHECK(instruction_->IsInvokeStaticOrDirect())
+ << "Unexpected instruction in read barrier arraycopy slow path: "
+ << instruction_->DebugName();
+ DCHECK(instruction_->GetLocations()->Intrinsified());
+ DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kSystemArrayCopy);
+
+ int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
+
+ CpuRegister src_curr_addr = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister dst_curr_addr = locations->GetTemp(1).AsRegister<CpuRegister>();
+ CpuRegister src_stop_addr = locations->GetTemp(2).AsRegister<CpuRegister>();
+
+ __ Bind(GetEntryLabel());
+ NearLabel loop;
+ __ Bind(&loop);
+ __ movl(CpuRegister(TMP), Address(src_curr_addr, 0));
+ __ MaybeUnpoisonHeapReference(CpuRegister(TMP));
+ // TODO: Inline the mark bit check before calling the runtime?
+ // TMP = ReadBarrier::Mark(TMP);
+ // No need to save live registers; it's taken care of by the
+ // entrypoint. Also, there is no need to update the stack mask,
+ // as this runtime call will not trigger a garbage collection.
+ int32_t entry_point_offset =
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(TMP);
+ // This runtime call does not require a stack map.
+ x86_64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
+ __ MaybePoisonHeapReference(CpuRegister(TMP));
+ __ movl(Address(dst_curr_addr, 0), CpuRegister(TMP));
+ __ addl(src_curr_addr, Immediate(element_size));
+ __ addl(dst_curr_addr, Immediate(element_size));
+ __ cmpl(src_curr_addr, src_stop_addr);
+ __ j(kNotEqual, &loop);
+ __ jmp(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathX86_64"; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathX86_64);
+};
+
+#undef __
+
#define __ assembler->
static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
@@ -1053,9 +1112,9 @@
void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
- // TODO(rpl): Implement read barriers in the SystemArrayCopy
- // intrinsic and re-enable it (b/29516905).
- if (kEmitCompilerReadBarrier) {
+ // The only read barrier implementation supporting the
+ // SystemArrayCopy intrinsic is the Baker-style read barriers.
+ if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -1063,9 +1122,9 @@
}
void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
- // TODO(rpl): Implement read barriers in the SystemArrayCopy
- // intrinsic and re-enable it (b/29516905).
- DCHECK(!kEmitCompilerReadBarrier);
+ // The only read barrier implementation supporting the
+ // SystemArrayCopy intrinsic is the Baker-style read barriers.
+ DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
X86_64Assembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -1074,18 +1133,23 @@
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+ uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
CpuRegister src = locations->InAt(0).AsRegister<CpuRegister>();
Location src_pos = locations->InAt(1);
CpuRegister dest = locations->InAt(2).AsRegister<CpuRegister>();
Location dest_pos = locations->InAt(3);
Location length = locations->InAt(4);
- CpuRegister temp1 = locations->GetTemp(0).AsRegister<CpuRegister>();
- CpuRegister temp2 = locations->GetTemp(1).AsRegister<CpuRegister>();
- CpuRegister temp3 = locations->GetTemp(2).AsRegister<CpuRegister>();
+ Location temp1_loc = locations->GetTemp(0);
+ CpuRegister temp1 = temp1_loc.AsRegister<CpuRegister>();
+ Location temp2_loc = locations->GetTemp(1);
+ CpuRegister temp2 = temp2_loc.AsRegister<CpuRegister>();
+ Location temp3_loc = locations->GetTemp(2);
+ CpuRegister temp3 = temp3_loc.AsRegister<CpuRegister>();
+ Location TMP_loc = Location::RegisterLocation(TMP);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCode* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ codegen_->AddSlowPath(intrinsic_slow_path);
NearLabel conditions_on_positions_validated;
SystemArrayCopyOptimizations optimizations(invoke);
@@ -1101,7 +1165,7 @@
DCHECK_GE(src_pos_constant, dest_pos_constant);
} else if (src_pos_constant < dest_pos_constant) {
__ cmpl(src, dest);
- __ j(kEqual, slow_path->GetEntryLabel());
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
}
} else {
if (!optimizations.GetDestinationIsSource()) {
@@ -1109,7 +1173,7 @@
__ j(kNotEqual, &conditions_on_positions_validated);
}
__ cmpl(dest_pos.AsRegister<CpuRegister>(), Immediate(src_pos_constant));
- __ j(kGreater, slow_path->GetEntryLabel());
+ __ j(kGreater, intrinsic_slow_path->GetEntryLabel());
}
} else {
if (!optimizations.GetDestinationIsSource()) {
@@ -1119,10 +1183,10 @@
if (dest_pos.IsConstant()) {
int32_t dest_pos_constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
__ cmpl(src_pos.AsRegister<CpuRegister>(), Immediate(dest_pos_constant));
- __ j(kLess, slow_path->GetEntryLabel());
+ __ j(kLess, intrinsic_slow_path->GetEntryLabel());
} else {
__ cmpl(src_pos.AsRegister<CpuRegister>(), dest_pos.AsRegister<CpuRegister>());
- __ j(kLess, slow_path->GetEntryLabel());
+ __ j(kLess, intrinsic_slow_path->GetEntryLabel());
}
}
@@ -1131,13 +1195,13 @@
if (!optimizations.GetSourceIsNotNull()) {
// Bail out if the source is null.
__ testl(src, src);
- __ j(kEqual, slow_path->GetEntryLabel());
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
}
if (!optimizations.GetDestinationIsNotNull() && !optimizations.GetDestinationIsSource()) {
// Bail out if the destination is null.
__ testl(dest, dest);
- __ j(kEqual, slow_path->GetEntryLabel());
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
}
// If the length is negative, bail out.
@@ -1146,7 +1210,7 @@
!optimizations.GetCountIsSourceLength() &&
!optimizations.GetCountIsDestinationLength()) {
__ testl(length.AsRegister<CpuRegister>(), length.AsRegister<CpuRegister>());
- __ j(kLess, slow_path->GetEntryLabel());
+ __ j(kLess, intrinsic_slow_path->GetEntryLabel());
}
// Validity checks: source.
@@ -1154,7 +1218,7 @@
src_pos,
src,
length,
- slow_path,
+ intrinsic_slow_path,
temp1,
optimizations.GetCountIsSourceLength());
@@ -1163,7 +1227,7 @@
dest_pos,
dest,
length,
- slow_path,
+ intrinsic_slow_path,
temp1,
optimizations.GetCountIsDestinationLength());
@@ -1172,38 +1236,80 @@
// type of the destination array. We do two checks: the classes are the same,
// or the destination is Object[]. If none of these checks succeed, we go to the
// slow path.
- __ movl(temp1, Address(dest, class_offset));
- __ movl(temp2, Address(src, class_offset));
+
bool did_unpoison = false;
- if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
- !optimizations.GetSourceIsNonPrimitiveArray()) {
- // One or two of the references need to be unpoisoned. Unpoison them
- // both to make the identity check valid.
- __ MaybeUnpoisonHeapReference(temp1);
- __ MaybeUnpoisonHeapReference(temp2);
- did_unpoison = true;
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // /* HeapReference<Class> */ temp1 = dest->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp1_loc, dest, class_offset, temp3_loc, /* needs_null_check */ false);
+ // Register `temp1` is not trashed by the read barrier emitted
+ // by GenerateFieldLoadWithBakerReadBarrier below, as that
+ // method produces a call to a ReadBarrierMarkRegX entry point,
+ // which saves all potentially live registers, including
+ // temporaries such a `temp1`.
+ // /* HeapReference<Class> */ temp2 = src->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check */ false);
+ // If heap poisoning is enabled, `temp1` and `temp2` have been
+ // unpoisoned by the the previous calls to
+ // GenerateFieldLoadWithBakerReadBarrier.
+ } else {
+ // /* HeapReference<Class> */ temp1 = dest->klass_
+ __ movl(temp1, Address(dest, class_offset));
+ // /* HeapReference<Class> */ temp2 = src->klass_
+ __ movl(temp2, Address(src, class_offset));
+ if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
+ !optimizations.GetSourceIsNonPrimitiveArray()) {
+ // One or two of the references need to be unpoisoned. Unpoison them
+ // both to make the identity check valid.
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ MaybeUnpoisonHeapReference(temp2);
+ did_unpoison = true;
+ }
}
if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
// Bail out if the destination is not a non primitive array.
- // /* HeapReference<Class> */ TMP = temp1->component_type_
- __ movl(CpuRegister(TMP), Address(temp1, component_offset));
- __ testl(CpuRegister(TMP), CpuRegister(TMP));
- __ j(kEqual, slow_path->GetEntryLabel());
- __ MaybeUnpoisonHeapReference(CpuRegister(TMP));
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // /* HeapReference<Class> */ TMP = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, TMP_loc, temp1, component_offset, temp3_loc, /* needs_null_check */ false);
+ __ testl(CpuRegister(TMP), CpuRegister(TMP));
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
+ // If heap poisoning is enabled, `TMP` has been unpoisoned by
+ // the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ } else {
+ // /* HeapReference<Class> */ TMP = temp1->component_type_
+ __ movl(CpuRegister(TMP), Address(temp1, component_offset));
+ __ testl(CpuRegister(TMP), CpuRegister(TMP));
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(CpuRegister(TMP));
+ }
__ cmpw(Address(CpuRegister(TMP), primitive_offset), Immediate(Primitive::kPrimNot));
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
}
if (!optimizations.GetSourceIsNonPrimitiveArray()) {
// Bail out if the source is not a non primitive array.
- // /* HeapReference<Class> */ TMP = temp2->component_type_
- __ movl(CpuRegister(TMP), Address(temp2, component_offset));
- __ testl(CpuRegister(TMP), CpuRegister(TMP));
- __ j(kEqual, slow_path->GetEntryLabel());
- __ MaybeUnpoisonHeapReference(CpuRegister(TMP));
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // For the same reason given earlier, `temp1` is not trashed by the
+ // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
+ // /* HeapReference<Class> */ TMP = temp2->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, TMP_loc, temp2, component_offset, temp3_loc, /* needs_null_check */ false);
+ __ testl(CpuRegister(TMP), CpuRegister(TMP));
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
+ // If heap poisoning is enabled, `TMP` has been unpoisoned by
+ // the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ } else {
+ // /* HeapReference<Class> */ TMP = temp2->component_type_
+ __ movl(CpuRegister(TMP), Address(temp2, component_offset));
+ __ testl(CpuRegister(TMP), CpuRegister(TMP));
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(CpuRegister(TMP));
+ }
__ cmpw(Address(CpuRegister(TMP), primitive_offset), Immediate(Primitive::kPrimNot));
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
}
__ cmpl(temp1, temp2);
@@ -1211,34 +1317,56 @@
if (optimizations.GetDestinationIsTypedObjectArray()) {
NearLabel do_copy;
__ j(kEqual, &do_copy);
- if (!did_unpoison) {
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp1_loc, temp1, component_offset, temp3_loc, /* needs_null_check */ false);
+ // We do not need to emit a read barrier for the following
+ // heap reference load, as `temp1` is only used in a
+ // comparison with null below, and this reference is not
+ // kept afterwards.
+ __ cmpl(Address(temp1, super_offset), Immediate(0));
+ } else {
+ if (!did_unpoison) {
+ __ MaybeUnpoisonHeapReference(temp1);
+ }
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ __ movl(temp1, Address(temp1, component_offset));
__ MaybeUnpoisonHeapReference(temp1);
+ // No need to unpoison the following heap reference load, as
+ // we're comparing against null.
+ __ cmpl(Address(temp1, super_offset), Immediate(0));
}
- // /* HeapReference<Class> */ temp1 = temp1->component_type_
- __ movl(temp1, Address(temp1, component_offset));
- __ MaybeUnpoisonHeapReference(temp1);
- // /* HeapReference<Class> */ temp1 = temp1->super_class_
- __ movl(temp1, Address(temp1, super_offset));
- // No need to unpoison the result, we're comparing against null.
- __ testl(temp1, temp1);
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
__ Bind(&do_copy);
} else {
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
}
} else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
// Bail out if the source is not a non primitive array.
- // /* HeapReference<Class> */ temp1 = src->klass_
- __ movl(temp1, Address(src, class_offset));
- __ MaybeUnpoisonHeapReference(temp1);
- // /* HeapReference<Class> */ TMP = temp1->component_type_
- __ movl(CpuRegister(TMP), Address(temp1, component_offset));
- __ testl(CpuRegister(TMP), CpuRegister(TMP));
- __ j(kEqual, slow_path->GetEntryLabel());
- __ MaybeUnpoisonHeapReference(CpuRegister(TMP));
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // /* HeapReference<Class> */ temp1 = src->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, temp1_loc, src, class_offset, temp3_loc, /* needs_null_check */ false);
+ // /* HeapReference<Class> */ TMP = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, TMP_loc, temp1, component_offset, temp3_loc, /* needs_null_check */ false);
+ __ testl(CpuRegister(TMP), CpuRegister(TMP));
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
+ } else {
+ // /* HeapReference<Class> */ temp1 = src->klass_
+ __ movl(temp1, Address(src, class_offset));
+ __ MaybeUnpoisonHeapReference(temp1);
+ // /* HeapReference<Class> */ TMP = temp1->component_type_
+ __ movl(CpuRegister(TMP), Address(temp1, component_offset));
+ // No need to unpoison `TMP` now, as we're comparing against null.
+ __ testl(CpuRegister(TMP), CpuRegister(TMP));
+ __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(CpuRegister(TMP));
+ }
__ cmpw(Address(CpuRegister(TMP), primitive_offset), Immediate(Primitive::kPrimNot));
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
}
// Compute base source address, base destination address, and end source address.
@@ -1266,19 +1394,86 @@
__ leal(temp3, Address(temp1, length.AsRegister<CpuRegister>(), ScaleFactor::TIMES_4, 0));
}
- // Iterate over the arrays and do a raw copy of the objects. We don't need to
- // poison/unpoison.
- NearLabel loop, done;
- __ cmpl(temp1, temp3);
- __ j(kEqual, &done);
- __ Bind(&loop);
- __ movl(CpuRegister(TMP), Address(temp1, 0));
- __ movl(Address(temp2, 0), CpuRegister(TMP));
- __ addl(temp1, Immediate(element_size));
- __ addl(temp2, Immediate(element_size));
- __ cmpl(temp1, temp3);
- __ j(kNotEqual, &loop);
- __ Bind(&done);
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // SystemArrayCopy implementation for Baker read barriers (see
+ // also CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier):
+ //
+ // if (src_ptr != end_ptr) {
+ // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
+ // lfence; // Load fence or artificial data dependency to prevent load-load reordering
+ // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // if (is_gray) {
+ // // Slow-path copy.
+ // do {
+ // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++)));
+ // } while (src_ptr != end_ptr)
+ // } else {
+ // // Fast-path copy.
+ // do {
+ // *dest_ptr++ = *src_ptr++;
+ // } while (src_ptr != end_ptr)
+ // }
+ // }
+
+ NearLabel loop, done;
+
+ // Don't enter copy loop if `length == 0`.
+ __ cmpl(temp1, temp3);
+ __ j(kEqual, &done);
+
+ // /* int32_t */ monitor = src->monitor_
+ __ movl(CpuRegister(TMP), Address(src, monitor_offset));
+ // /* LockWord */ lock_word = LockWord(monitor)
+ static_assert(sizeof(LockWord) == sizeof(int32_t),
+ "art::LockWord and int32_t have different sizes.");
+
+ // Load fence to prevent load-load reordering.
+ // Note that this is a no-op, thanks to the x86-64 memory model.
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+
+ // Slow path used to copy array when `src` is gray.
+ SlowPathCode* read_barrier_slow_path =
+ new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathX86_64(invoke);
+ codegen_->AddSlowPath(read_barrier_slow_path);
+
+ // Given the numeric representation, it's enough to check the low bit of the
+ // rb_state. We do that by shifting the bit out of the lock word with SHR.
+ static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
+ static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ __ shrl(CpuRegister(TMP), Immediate(LockWord::kReadBarrierStateShift + 1));
+ __ j(kCarrySet, read_barrier_slow_path->GetEntryLabel());
+
+ // Fast-path copy.
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison.
+ __ Bind(&loop);
+ __ movl(CpuRegister(TMP), Address(temp1, 0));
+ __ movl(Address(temp2, 0), CpuRegister(TMP));
+ __ addl(temp1, Immediate(element_size));
+ __ addl(temp2, Immediate(element_size));
+ __ cmpl(temp1, temp3);
+ __ j(kNotEqual, &loop);
+
+ __ Bind(read_barrier_slow_path->GetExitLabel());
+ __ Bind(&done);
+ } else {
+ // Non read barrier code.
+
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison.
+ NearLabel loop, done;
+ __ cmpl(temp1, temp3);
+ __ j(kEqual, &done);
+ __ Bind(&loop);
+ __ movl(CpuRegister(TMP), Address(temp1, 0));
+ __ movl(Address(temp2, 0), CpuRegister(TMP));
+ __ addl(temp1, Immediate(element_size));
+ __ addl(temp2, Immediate(element_size));
+ __ cmpl(temp1, temp3);
+ __ j(kNotEqual, &loop);
+ __ Bind(&done);
+ }
// We only need one card marking on the destination array.
codegen_->MarkGCCard(temp1,
@@ -1287,7 +1482,7 @@
CpuRegister(kNoRegister),
/* value_can_be_null */ false);
- __ Bind(slow_path->GetExitLabel());
+ __ Bind(intrinsic_slow_path->GetExitLabel());
}
void IntrinsicLocationsBuilderX86_64::VisitStringCompareTo(HInvoke* invoke) {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 231a3b0..a1da20b 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -180,6 +180,7 @@
private:
void StartPass(const char* pass_name) {
+ VLOG(compiler) << "Starting pass: " << pass_name;
// Dump graph first, then start timer.
if (visualizer_enabled_) {
visualizer_.DumpGraph(pass_name, /* is_after_pass */ false, graph_in_bad_state_);
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index cfdb41a..a21595f 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -227,7 +227,8 @@
out_degree_(interval->HasRegister() ? std::numeric_limits<size_t>::max() : 0),
alias_(this),
spill_weight_(ComputeSpillWeight(interval, liveness)),
- requires_color_(interval->RequiresRegister()) {
+ requires_color_(interval->RequiresRegister()),
+ needs_spill_slot_(false) {
DCHECK(!interval->IsHighInterval()) << "Pair nodes should be represented by the low interval";
}
@@ -342,6 +343,14 @@
return (IsPair() || other->IsPair()) ? 2 : 1;
}
+ bool NeedsSpillSlot() const {
+ return needs_spill_slot_;
+ }
+
+ void SetNeedsSpillSlot() {
+ needs_spill_slot_ = true;
+ }
+
// The current stage of this node, indicating which worklist it belongs to.
NodeStage stage;
@@ -376,6 +385,8 @@
const bool requires_color_;
+ bool needs_spill_slot_;
+
DISALLOW_COPY_AND_ASSIGN(InterferenceNode);
};
@@ -549,10 +560,10 @@
safepoints_(allocator->Adapter(kArenaAllocRegisterAllocator)),
physical_core_nodes_(allocator->Adapter(kArenaAllocRegisterAllocator)),
physical_fp_nodes_(allocator->Adapter(kArenaAllocRegisterAllocator)),
- int_spill_slot_counter_(0),
- double_spill_slot_counter_(0),
- float_spill_slot_counter_(0),
- long_spill_slot_counter_(0),
+ num_int_spill_slots_(0),
+ num_double_spill_slots_(0),
+ num_float_spill_slots_(0),
+ num_long_spill_slots_(0),
catch_phi_spill_slot_counter_(0),
reserved_art_method_slots_(ComputeReservedArtMethodSlots(*codegen)),
reserved_out_slots_(codegen->GetGraph()->GetMaximumNumberOfOutVRegs()),
@@ -653,6 +664,9 @@
}
if (successful) {
+ // Assign spill slots.
+ AllocateSpillSlots(iteration.GetPrunableNodes());
+
// Compute the maximum number of live registers across safepoints.
// Notice that we do not count globally blocked registers, such as the stack pointer.
if (safepoints.size() > 0) {
@@ -700,10 +714,10 @@
.Resolve(max_safepoint_live_core_regs_,
max_safepoint_live_fp_regs_,
reserved_art_method_slots_ + reserved_out_slots_,
- int_spill_slot_counter_,
- long_spill_slot_counter_,
- float_spill_slot_counter_,
- double_spill_slot_counter_,
+ num_int_spill_slots_,
+ num_long_spill_slots_,
+ num_float_spill_slots_,
+ num_double_spill_slots_,
catch_phi_spill_slot_counter_,
temp_intervals_);
@@ -743,10 +757,10 @@
}
}
- size_t spill_slots = int_spill_slot_counter_
- + long_spill_slot_counter_
- + float_spill_slot_counter_
- + double_spill_slot_counter_
+ size_t spill_slots = num_int_spill_slots_
+ + num_long_spill_slots_
+ + num_float_spill_slots_
+ + num_double_spill_slots_
+ catch_phi_spill_slot_counter_;
bool ok = ValidateIntervals(intervals,
spill_slots,
@@ -1910,7 +1924,7 @@
// be colored, and that we should split.
} else {
// Spill.
- register_allocator_->AllocateSpillSlotFor(interval);
+ node->SetNeedsSpillSlot();
}
}
@@ -1936,52 +1950,156 @@
return max_safepoint_live_regs;
}
-void RegisterAllocatorGraphColor::AllocateSpillSlotFor(LiveInterval* interval) {
- LiveInterval* parent = interval->GetParent();
- HInstruction* defined_by = parent->GetDefinedBy();
- if (parent->HasSpillSlot()) {
- // We already have a spill slot for this value that we can reuse.
- } else if (defined_by->IsParameterValue()) {
- // Parameters already have a stack slot.
- parent->SetSpillSlot(codegen_->GetStackSlotOfParameter(defined_by->AsParameterValue()));
- } else if (defined_by->IsCurrentMethod()) {
- // The current method is always at spill slot 0.
- parent->SetSpillSlot(0);
- } else if (defined_by->IsConstant()) {
- // Constants don't need a spill slot.
- } else {
- // Allocate a spill slot based on type.
- size_t* spill_slot_counter;
- switch (interval->GetType()) {
- case Primitive::kPrimDouble:
- spill_slot_counter = &double_spill_slot_counter_;
- break;
- case Primitive::kPrimLong:
- spill_slot_counter = &long_spill_slot_counter_;
- break;
- case Primitive::kPrimFloat:
- spill_slot_counter = &float_spill_slot_counter_;
- break;
- case Primitive::kPrimNot:
- case Primitive::kPrimInt:
- case Primitive::kPrimChar:
- case Primitive::kPrimByte:
- case Primitive::kPrimBoolean:
- case Primitive::kPrimShort:
- spill_slot_counter = &int_spill_slot_counter_;
- break;
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unexpected type for interval " << interval->GetType();
- UNREACHABLE();
+void RegisterAllocatorGraphColor::AllocateSpillSlots(const ArenaVector<InterferenceNode*>& nodes) {
+ // The register allocation resolver will organize the stack based on value type,
+ // so we assign stack slots for each value type separately.
+ ArenaVector<LiveInterval*> double_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ArenaVector<LiveInterval*> long_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ArenaVector<LiveInterval*> float_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ArenaVector<LiveInterval*> int_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
+
+ // The set of parent intervals already handled.
+ ArenaSet<LiveInterval*> seen(allocator_->Adapter(kArenaAllocRegisterAllocator));
+
+ // Find nodes that need spill slots.
+ for (InterferenceNode* node : nodes) {
+ if (!node->NeedsSpillSlot()) {
+ continue;
}
- parent->SetSpillSlot(*spill_slot_counter);
- *spill_slot_counter += parent->NeedsTwoSpillSlots() ? 2 : 1;
- // TODO: Could color stack slots if we wanted to, even if
- // it's just a trivial coloring. See the linear scan implementation,
- // which simply reuses spill slots for values whose live intervals
- // have already ended.
+ LiveInterval* parent = node->GetInterval()->GetParent();
+ if (seen.find(parent) != seen.end()) {
+ // We've already handled this interval.
+ // This can happen if multiple siblings of the same interval request a stack slot.
+ continue;
+ }
+ seen.insert(parent);
+
+ HInstruction* defined_by = parent->GetDefinedBy();
+ if (parent->HasSpillSlot()) {
+ // We already have a spill slot for this value that we can reuse.
+ } else if (defined_by->IsParameterValue()) {
+ // Parameters already have a stack slot.
+ parent->SetSpillSlot(codegen_->GetStackSlotOfParameter(defined_by->AsParameterValue()));
+ } else if (defined_by->IsCurrentMethod()) {
+ // The current method is always at stack slot 0.
+ parent->SetSpillSlot(0);
+ } else if (defined_by->IsConstant()) {
+ // Constants don't need a spill slot.
+ } else {
+ // We need to find a spill slot for this interval. Place it in the correct
+ // worklist to be processed later.
+ switch (node->GetInterval()->GetType()) {
+ case Primitive::kPrimDouble:
+ double_intervals.push_back(parent);
+ break;
+ case Primitive::kPrimLong:
+ long_intervals.push_back(parent);
+ break;
+ case Primitive::kPrimFloat:
+ float_intervals.push_back(parent);
+ break;
+ case Primitive::kPrimNot:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimShort:
+ int_intervals.push_back(parent);
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected type for interval " << node->GetInterval()->GetType();
+ UNREACHABLE();
+ }
+ }
}
+
+ // Color spill slots for each value type.
+ ColorSpillSlots(&double_intervals, &num_double_spill_slots_);
+ ColorSpillSlots(&long_intervals, &num_long_spill_slots_);
+ ColorSpillSlots(&float_intervals, &num_float_spill_slots_);
+ ColorSpillSlots(&int_intervals, &num_int_spill_slots_);
+}
+
+void RegisterAllocatorGraphColor::ColorSpillSlots(ArenaVector<LiveInterval*>* intervals,
+ size_t* num_stack_slots_used) {
+ // We cannot use the original interference graph here because spill slots are assigned to
+ // all of the siblings of an interval, whereas an interference node represents only a single
+ // sibling. So, we assign spill slots linear-scan-style by sorting all the interval endpoints
+ // by position, and assigning the lowest spill slot available when we encounter an interval
+ // beginning. We ignore lifetime holes for simplicity.
+ ArenaVector<std::tuple<size_t, bool, LiveInterval*>> interval_endpoints(
+ allocator_->Adapter(kArenaAllocRegisterAllocator));
+
+ for (auto it = intervals->begin(), e = intervals->end(); it != e; ++it) {
+ LiveInterval* parent_interval = *it;
+ DCHECK(parent_interval->IsParent());
+ DCHECK(!parent_interval->HasSpillSlot());
+ size_t start = parent_interval->GetStart();
+ size_t end = parent_interval->GetLastSibling()->GetEnd();
+ DCHECK_LT(start, end);
+ interval_endpoints.push_back(std::make_tuple(start, true, parent_interval));
+ interval_endpoints.push_back(std::make_tuple(end, false, parent_interval));
+ }
+
+ // Sort by position.
+ // We explicitly ignore the third entry of each tuple (the interval pointer) in order
+ // to maintain determinism.
+ std::sort(interval_endpoints.begin(), interval_endpoints.end(),
+ [] (const std::tuple<size_t, bool, LiveInterval*>& lhs,
+ const std::tuple<size_t, bool, LiveInterval*>& rhs) {
+ return std::tie(std::get<0>(lhs), std::get<1>(lhs))
+ < std::tie(std::get<0>(rhs), std::get<1>(rhs));
+ });
+
+ ArenaBitVector taken(allocator_, 0, true);
+ for (auto it = interval_endpoints.begin(), end = interval_endpoints.end(); it != end; ++it) {
+ // Extract information from the current tuple.
+ LiveInterval* parent_interval;
+ bool is_interval_beginning;
+ size_t position;
+ std::tie(position, is_interval_beginning, parent_interval) = *it;
+
+ bool needs_two_slots = parent_interval->NeedsTwoSpillSlots();
+
+ if (is_interval_beginning) {
+ DCHECK(!parent_interval->HasSpillSlot());
+ DCHECK_EQ(position, parent_interval->GetStart());
+
+ // Find a free stack slot.
+ size_t slot = 0;
+ for (; taken.IsBitSet(slot) || (needs_two_slots && taken.IsBitSet(slot + 1)); ++slot) {
+ // Skip taken slots.
+ }
+ parent_interval->SetSpillSlot(slot);
+
+ *num_stack_slots_used = std::max(*num_stack_slots_used,
+ needs_two_slots ? slot + 1 : slot + 2);
+ if (needs_two_slots && *num_stack_slots_used % 2 != 0) {
+ // The parallel move resolver requires that there be an even number of spill slots
+ // allocated for pair value types.
+ ++(*num_stack_slots_used);
+ }
+
+ taken.SetBit(slot);
+ if (needs_two_slots) {
+ taken.SetBit(slot + 1);
+ }
+ } else {
+ DCHECK_EQ(position, parent_interval->GetLastSibling()->GetEnd());
+ DCHECK(parent_interval->HasSpillSlot());
+
+ // Free up the stack slot used by this interval.
+ size_t slot = parent_interval->GetSpillSlot();
+ DCHECK(taken.IsBitSet(slot));
+ DCHECK(!needs_two_slots || taken.IsBitSet(slot + 1));
+ taken.ClearBit(slot);
+ if (needs_two_slots) {
+ taken.ClearBit(slot + 1);
+ }
+ }
+ }
+ DCHECK_EQ(taken.NumSetBits(), 0u);
}
} // namespace art
diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h
index 9dddcea..ed12561 100644
--- a/compiler/optimizing/register_allocator_graph_color.h
+++ b/compiler/optimizing/register_allocator_graph_color.h
@@ -144,9 +144,13 @@
// based on the outgoing interference edges of safepoint nodes.
size_t ComputeMaxSafepointLiveRegisters(const ArenaVector<InterferenceNode*>& safepoints);
- // If necessary, add the given interval to the list of spilled intervals,
- // and make sure it's ready to be spilled to the stack.
- void AllocateSpillSlotFor(LiveInterval* interval);
+ // Assigns stack slots to a list of intervals, ensuring that interfering intervals are not
+ // assigned the same stack slot.
+ void ColorSpillSlots(ArenaVector<LiveInterval*>* nodes,
+ size_t* num_stack_slots_used);
+
+ // Provide stack slots to nodes that need them.
+ void AllocateSpillSlots(const ArenaVector<InterferenceNode*>& nodes);
// Whether iterative move coalescing should be performed. Iterative move coalescing
// improves code quality, but increases compile time.
@@ -170,10 +174,10 @@
ArenaVector<InterferenceNode*> physical_fp_nodes_;
// Allocated stack slot counters.
- size_t int_spill_slot_counter_;
- size_t double_spill_slot_counter_;
- size_t float_spill_slot_counter_;
- size_t long_spill_slot_counter_;
+ size_t num_int_spill_slots_;
+ size_t num_double_spill_slots_;
+ size_t num_float_spill_slots_;
+ size_t num_long_spill_slots_;
size_t catch_phi_spill_slot_counter_;
// Number of stack slots needed for the pointer to the current method.
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index b73f738..6effc30 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -279,8 +279,7 @@
const DexFile& dex_file = load_string->GetDexFile();
uint32_t string_index = load_string->GetStringIndex();
- bool is_in_dex_cache = false;
- HLoadString::LoadKind desired_load_kind;
+ HLoadString::LoadKind desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
uint64_t address = 0u; // String or dex cache element address.
{
Runtime* runtime = Runtime::Current();
@@ -296,33 +295,14 @@
DCHECK(!runtime->UseJitCompilation());
mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
CHECK(string != nullptr);
- if (!compiler_driver_->GetSupportBootImageFixup()) {
- // MIPS/MIPS64 or compiler_driver_test. Do not sharpen.
- desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
- } else {
- DCHECK(ContainsElement(compiler_driver_->GetDexFilesForOatFile(), &dex_file));
- is_in_dex_cache = true;
- desired_load_kind = codegen_->GetCompilerOptions().GetCompilePic()
- ? HLoadString::LoadKind::kBootImageLinkTimePcRelative
- : HLoadString::LoadKind::kBootImageLinkTimeAddress;
- }
+ // TODO: In follow up CL, add PcRelative and Address back in.
} else if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
// DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
mirror::String* string = dex_cache->GetResolvedString(string_index);
- is_in_dex_cache = (string != nullptr);
if (string != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
- // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
address = reinterpret_cast64<uint64_t>(string);
- } else {
- // Note: If the string is not in the dex cache, the instruction needs environment
- // and will not be inlined across dex files. Within a dex file, the slow-path helper
- // loads the correct string and inlined frames are used correctly for OOM stack trace.
- // TODO: Write a test for this. Bug: 29416588
- desired_load_kind = HLoadString::LoadKind::kDexCacheAddress;
- void* dex_cache_element_address = &dex_cache->GetStrings()[string_index];
- address = reinterpret_cast64<uint64_t>(dex_cache_element_address);
}
} else {
// AOT app compilation. Try to lookup the string without allocating if not found.
@@ -332,19 +312,9 @@
!codegen_->GetCompilerOptions().GetCompilePic()) {
desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
address = reinterpret_cast64<uint64_t>(string);
- } else {
- // Not JIT and either the string is not in boot image or we are compiling in PIC mode.
- // Use PC-relative load from the dex cache if the dex file belongs
- // to the oat file that we're currently compiling.
- desired_load_kind = ContainsElement(compiler_driver_->GetDexFilesForOatFile(), &dex_file)
- ? HLoadString::LoadKind::kDexCachePcRelative
- : HLoadString::LoadKind::kDexCacheViaMethod;
}
}
}
- if (is_in_dex_cache) {
- load_string->MarkInDexCache();
- }
HLoadString::LoadKind load_kind = codegen_->GetSupportedLoadStringKind(desired_load_kind);
switch (load_kind) {
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 86548e1..c52a5a9 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -908,6 +908,12 @@
// reg = -reg.
rsb(reg, reg, ShifterOperand(0));
}
+ // Poison a heap reference contained in `reg` if heap poisoning is enabled.
+ void MaybePoisonHeapReference(Register reg) {
+ if (kPoisonHeapReferences) {
+ PoisonHeapReference(reg);
+ }
+ }
// Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
void MaybeUnpoisonHeapReference(Register reg) {
if (kPoisonHeapReferences) {
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 19450b3..f91bcfa 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -146,6 +146,12 @@
___ Neg(reg, Operand(reg));
}
+void Arm64Assembler::MaybePoisonHeapReference(Register reg) {
+ if (kPoisonHeapReferences) {
+ PoisonHeapReference(reg);
+ }
+}
+
void Arm64Assembler::MaybeUnpoisonHeapReference(Register reg) {
if (kPoisonHeapReferences) {
UnpoisonHeapReference(reg);
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 2847cb8..66a7fed 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -93,6 +93,8 @@
void PoisonHeapReference(vixl::aarch64::Register reg);
// Unpoison a heap reference contained in `reg`.
void UnpoisonHeapReference(vixl::aarch64::Register reg);
+ // Poison a heap reference contained in `reg` if heap poisoning is enabled.
+ void MaybePoisonHeapReference(vixl::aarch64::Register reg);
// Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
void MaybeUnpoisonHeapReference(vixl::aarch64::Register reg);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 92a92a5..63aa4a4 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -639,6 +639,12 @@
void PoisonHeapReference(Register reg) { negl(reg); }
// Unpoison a heap reference contained in `reg`.
void UnpoisonHeapReference(Register reg) { negl(reg); }
+ // Poison a heap reference contained in `reg` if heap poisoning is enabled.
+ void MaybePoisonHeapReference(Register reg) {
+ if (kPoisonHeapReferences) {
+ PoisonHeapReference(reg);
+ }
+ }
// Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
void MaybeUnpoisonHeapReference(Register reg) {
if (kPoisonHeapReferences) {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 370f49c..a4166f9 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -741,6 +741,12 @@
void PoisonHeapReference(CpuRegister reg) { negl(reg); }
// Unpoison a heap reference contained in `reg`.
void UnpoisonHeapReference(CpuRegister reg) { negl(reg); }
+ // Poison a heap reference contained in `reg` if heap poisoning is enabled.
+ void MaybePoisonHeapReference(CpuRegister reg) {
+ if (kPoisonHeapReferences) {
+ PoisonHeapReference(reg);
+ }
+ }
// Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
void MaybeUnpoisonHeapReference(CpuRegister reg) {
if (kPoisonHeapReferences) {
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index e604c1f..bcd0d16 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -32,10 +32,8 @@
return new arm::DisassemblerArm(options);
} else if (instruction_set == kArm64) {
return new arm64::DisassemblerArm64(options);
- } else if (instruction_set == kMips) {
- return new mips::DisassemblerMips(options, false);
- } else if (instruction_set == kMips64) {
- return new mips::DisassemblerMips(options, true);
+ } else if (instruction_set == kMips || instruction_set == kMips64) {
+ return new mips::DisassemblerMips(options);
} else if (instruction_set == kX86) {
return new x86::DisassemblerX86(options, false);
} else if (instruction_set == kX86_64) {
diff --git a/disassembler/disassembler.h b/disassembler/disassembler.h
index b080315..86793cc 100644
--- a/disassembler/disassembler.h
+++ b/disassembler/disassembler.h
@@ -28,8 +28,9 @@
class DisassemblerOptions {
public:
- // Should the disassembler print absolute or relative addresses.
- const bool absolute_addresses_;
+ using ThreadOffsetNameFunction = void (*)(std::ostream& os, uint32_t offset);
+
+ ThreadOffsetNameFunction thread_offset_name_function_;
// Base address for calculating relative code offsets when absolute_addresses_ is false.
const uint8_t* const base_address_;
@@ -37,6 +38,9 @@
// End address (exclusive);
const uint8_t* const end_address_;
+ // Should the disassembler print absolute or relative addresses.
+ const bool absolute_addresses_;
+
// If set, the disassembler is allowed to look at load targets in literal
// pools.
const bool can_read_literals_;
@@ -44,10 +48,12 @@
DisassemblerOptions(bool absolute_addresses,
const uint8_t* base_address,
const uint8_t* end_address,
- bool can_read_literals)
- : absolute_addresses_(absolute_addresses),
+ bool can_read_literals,
+ ThreadOffsetNameFunction fn)
+ : thread_offset_name_function_(fn),
base_address_(base_address),
end_address_(end_address),
+ absolute_addresses_(absolute_addresses),
can_read_literals_(can_read_literals) {}
private:
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 4f0e144..a47b6ad 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -25,7 +25,6 @@
#include "base/bit_utils.h"
#include "base/logging.h"
#include "base/stringprintf.h"
-#include "thread.h"
namespace art {
namespace arm {
@@ -329,7 +328,7 @@
}
if (rn.r == 9) {
args << " ; ";
- Thread::DumpThreadOffset<kArmPointerSize>(args, offset);
+ GetDisassemblerOptions()->thread_offset_name_function_(args, offset);
}
}
}
@@ -1401,7 +1400,7 @@
args << Rt << ", [" << Rn << ", #" << (U != 0u ? "" : "-") << imm12 << "]";
if (Rn.r == TR && is_load) {
args << " ; ";
- Thread::DumpThreadOffset<kArmPointerSize>(args, imm12);
+ GetDisassemblerOptions()->thread_offset_name_function_(args, imm12);
} else if (Rn.r == PC) {
T2LitType lit_type[] = {
kT2LitUByte, kT2LitUHalf, kT2LitHexWord, kT2LitInvalid,
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index 0ef9025..80bacb2 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -22,7 +22,6 @@
#include "base/logging.h"
#include "base/stringprintf.h"
-#include "thread.h"
using namespace vixl::aarch64; // NOLINT(build/namespaces)
@@ -102,7 +101,7 @@
if (instr->GetRn() == TR) {
int64_t offset = instr->GetImmLSUnsigned() << instr->GetSizeLS();
std::ostringstream tmp_stream;
- Thread::DumpThreadOffset<kArm64PointerSize>(tmp_stream, static_cast<uint32_t>(offset));
+ options_->thread_offset_name_function_(tmp_stream, static_cast<uint32_t>(offset));
AppendToOutput(" ; %s", tmp_stream.str().c_str());
}
}
diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h
index 7c64792..19e4dfb 100644
--- a/disassembler/disassembler_arm64.h
+++ b/disassembler/disassembler_arm64.h
@@ -35,7 +35,8 @@
: vixl::aarch64::Disassembler(),
read_literals_(options->can_read_literals_),
base_address_(options->base_address_),
- end_address_(options->end_address_) {
+ end_address_(options->end_address_),
+ options_(options) {
if (!options->absolute_addresses_) {
MapCodeAddress(0,
reinterpret_cast<const vixl::aarch64::Instruction*>(options->base_address_));
@@ -64,6 +65,8 @@
// Valid address range: [base_address_, end_address_)
const void* const base_address_;
const void* const end_address_;
+
+ DisassemblerOptions* options_;
};
class DisassemblerArm64 FINAL : public Disassembler {
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 3448878..02c6d71 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -21,7 +21,6 @@
#include "base/logging.h"
#include "base/stringprintf.h"
-#include "thread.h"
namespace art {
namespace mips {
@@ -503,11 +502,7 @@
args << StringPrintf("%+d(r%d)", offset, rs);
if (rs == 17) {
args << " ; ";
- if (is64bit_) {
- Thread::DumpThreadOffset<kMips64PointerSize>(args, offset);
- } else {
- Thread::DumpThreadOffset<kMipsPointerSize>(args, offset);
- }
+ GetDisassemblerOptions()->thread_offset_name_function_(args, offset);
}
}
break;
diff --git a/disassembler/disassembler_mips.h b/disassembler/disassembler_mips.h
index b0e49b3..6342f22 100644
--- a/disassembler/disassembler_mips.h
+++ b/disassembler/disassembler_mips.h
@@ -26,9 +26,8 @@
class DisassemblerMips FINAL : public Disassembler {
public:
- DisassemblerMips(DisassemblerOptions* options, bool is64bit)
+ explicit DisassemblerMips(DisassemblerOptions* options)
: Disassembler(options),
- is64bit_(is64bit),
last_ptr_(nullptr),
last_instr_(0) {}
@@ -36,8 +35,6 @@
void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
private:
- const bool is64bit_;
-
// Address and encoding of the last disassembled instruction.
// Needed to produce more readable disassembly of certain 2-instruction sequences.
const uint8_t* last_ptr_;
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 147e0b1..2ca84e5 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -23,7 +23,6 @@
#include "base/logging.h"
#include "base/stringprintf.h"
-#include "thread.h"
namespace art {
namespace x86 {
@@ -1409,11 +1408,11 @@
}
if (prefix[1] == kFs && !supports_rex_) {
args << " ; ";
- Thread::DumpThreadOffset<kX86PointerSize>(args, address_bits);
+ GetDisassemblerOptions()->thread_offset_name_function_(args, address_bits);
}
if (prefix[1] == kGs && supports_rex_) {
args << " ; ";
- Thread::DumpThreadOffset<kX86_64PointerSize>(args, address_bits);
+ GetDisassemblerOptions()->thread_offset_name_function_(args, address_bits);
}
const char* prefix_str;
switch (prefix[0]) {
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 77730b9..96c8e94 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -335,10 +335,14 @@
resolved_addr2instr_(0),
instruction_set_(oat_file_.GetOatHeader().GetInstructionSet()),
disassembler_(Disassembler::Create(instruction_set_,
- new DisassemblerOptions(options_.absolute_addresses_,
- oat_file.Begin(),
- oat_file.End(),
- true /* can_read_literals_ */))) {
+ new DisassemblerOptions(
+ options_.absolute_addresses_,
+ oat_file.Begin(),
+ oat_file.End(),
+ true /* can_read_literals_ */,
+ Is64BitInstructionSet(instruction_set_)
+ ? &Thread::DumpThreadOffset<PointerSize::k64>
+ : &Thread::DumpThreadOffset<PointerSize::k32>))) {
CHECK(options_.class_loader_ != nullptr);
CHECK(options_.class_filter_ != nullptr);
CHECK(options_.method_filter_ != nullptr);
@@ -1402,7 +1406,7 @@
const std::vector<const OatFile::OatDexFile*> oat_dex_files_;
const OatDumperOptions& options_;
uint32_t resolved_addr2instr_;
- InstructionSet instruction_set_;
+ const InstructionSet instruction_set_;
std::set<uintptr_t> offsets_;
Disassembler* disassembler_;
};
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 9432384..3f6531b 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -37,6 +37,7 @@
#include "gc/space/image_space.h"
#include "image-inl.h"
#include "mirror/abstract_method.h"
+#include "mirror/dex_cache.h"
#include "mirror/object-inl.h"
#include "mirror/method.h"
#include "mirror/reference.h"
@@ -592,8 +593,8 @@
// 64-bit values here, clearing the top 32 bits for 32-bit targets. The zero-extension is
// done by casting to the unsigned type uintptr_t before casting to int64_t, i.e.
// static_cast<int64_t>(reinterpret_cast<uintptr_t>(image_begin_ + offset))).
- GcRoot<mirror::String>* orig_strings = orig_dex_cache->GetStrings();
- GcRoot<mirror::String>* relocated_strings = RelocatedAddressOfPointer(orig_strings);
+ mirror::StringDexCacheType* orig_strings = orig_dex_cache->GetStrings();
+ mirror::StringDexCacheType* relocated_strings = RelocatedAddressOfPointer(orig_strings);
copy_dex_cache->SetField64<false>(
mirror::DexCache::StringsOffset(),
static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_strings)));
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index c4ec726..11357b5 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -191,7 +191,7 @@
.cfi_rel_offset r11, 44
.cfi_rel_offset ip, 48
.cfi_rel_offset lr, 52
- vpush {d0-d15} @ 32 words of float args.
+ vpush {d0-d15} @ 32 words, 2 for each of the 16 saved doubles.
.cfi_adjust_cfa_offset 128
sub sp, #8 @ 2 words of space, alignment padding and Method*
.cfi_adjust_cfa_offset 8
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 4289cab..3e6fbaf 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -331,6 +331,7 @@
#endif
// Save FP registers.
+ // For better performance, store d0 and d31 separately, so that all STPs are 16-byte aligned.
str d0, [sp, #8]
stp d1, d2, [sp, #16]
stp d3, d4, [sp, #32]
@@ -431,6 +432,7 @@
.macro RESTORE_SAVE_EVERYTHING_FRAME
// Restore FP registers.
+ // For better performance, load d0 and d31 separately, so that all LDPs are 16-byte aligned.
ldr d0, [sp, #8]
ldp d1, d2, [sp, #16]
ldp d3, d4, [sp, #32]
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 32768b0..3266d86 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1330,7 +1330,38 @@
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeInitializedRegionTLAB
END_FUNCTION art_quick_alloc_object_initialized_region_tlab
-ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+DEFINE_FUNCTION art_quick_resolve_string
+ movq 8(%rsp), %rcx // get referrer
+ movl ART_METHOD_DECLARING_CLASS_OFFSET(%rcx), %ecx // get declaring class
+ movq DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET(%ecx), %rcx // get string dex cache
+ movq LITERAL(STRING_DEX_CACHE_SIZE_MINUS_ONE), %rdx
+ andq %rdi, %rdx
+ shlq LITERAL(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT), %rdx
+ addq %rcx, %rdx
+ movq %rax, %rcx
+ movq (%rdx), %rdx
+ movq %rdx, %rax
+ movl %eax, %eax
+ shrq LITERAL(32), %rdx
+ cmp %rdx, %rdi
+ jne .Lart_quick_resolve_string_slow_path
+#ifdef USE_READ_BARRIER
+ testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%rax)
+ jz .Lart_quick_resolve_string_slow_path
+#endif
+ ret
+
+.Lart_quick_resolve_string_slow_path:
+ SETUP_SAVE_REFS_ONLY_FRAME
+ movq %rcx, %rax
+ // Outgoing argument set up
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+
+ call artResolveStringFromCode // artResolveStringFromCode(arg0, referrer, Thread*)
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END_FUNCTION art_quick_resolve_string
+
ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index d812590..f9bc249 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -477,7 +477,7 @@
DCHECK(method_header->Contains(pc))
<< PrettyMethod(this)
- << std::hex << pc << " " << oat_entry_point
+ << " " << std::hex << pc << " " << oat_entry_point
<< " " << (uintptr_t)(method_header->code_ + method_header->code_size_);
return method_header;
}
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 848f8e5..102b993 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -19,12 +19,15 @@
#if defined(__cplusplus)
#include "art_method.h"
+#include "base/bit_utils.h"
#include "gc/allocator/rosalloc.h"
#include "gc/heap.h"
#include "jit/jit.h"
#include "lock_word.h"
#include "mirror/class.h"
+#include "mirror/dex_cache.h"
#include "mirror/string.h"
+#include "utils/dex_cache_arrays_layout.h"
#include "runtime.h"
#include "thread.h"
#endif
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index b84e29f..aeb990c 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -163,6 +163,7 @@
MallocArena::MallocArena(size_t size) {
memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
CHECK(memory_ != nullptr); // Abort on OOM.
+ DCHECK_ALIGNED(memory_, ArenaAllocator::kAlignment);
size_ = size;
}
@@ -370,6 +371,7 @@
arena_head_ = new_arena;
// Update our internal data structures.
begin_ = new_arena->Begin();
+ DCHECK_ALIGNED(begin_, kAlignment);
ptr_ = begin_ + bytes;
end_ = new_arena->End();
}
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 6c1a898..3fad96b 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -310,6 +310,7 @@
return AllocFromNewArena(bytes);
}
uint8_t* ret = ptr_;
+ DCHECK_ALIGNED(ret, kAlignment);
ptr_ += bytes;
return ret;
}
@@ -319,20 +320,24 @@
ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE {
DCHECK_GE(new_size, ptr_size);
DCHECK_EQ(ptr == nullptr, ptr_size == 0u);
- auto* end = reinterpret_cast<uint8_t*>(ptr) + ptr_size;
+ // We always allocate aligned.
+ const size_t aligned_ptr_size = RoundUp(ptr_size, kAlignment);
+ auto* end = reinterpret_cast<uint8_t*>(ptr) + aligned_ptr_size;
// If we haven't allocated anything else, we can safely extend.
if (end == ptr_) {
DCHECK(!IsRunningOnMemoryTool()); // Red zone prevents end == ptr_.
- const size_t size_delta = new_size - ptr_size;
+ const size_t aligned_new_size = RoundUp(new_size, kAlignment);
+ const size_t size_delta = aligned_new_size - aligned_ptr_size;
// Check remain space.
const size_t remain = end_ - ptr_;
if (remain >= size_delta) {
ptr_ += size_delta;
ArenaAllocatorStats::RecordAlloc(size_delta, kind);
+ DCHECK_ALIGNED(ptr_, kAlignment);
return ptr;
}
}
- auto* new_ptr = Alloc(new_size, kind);
+ auto* new_ptr = Alloc(new_size, kind); // Note: Alloc will take care of aligning new_size.
memcpy(new_ptr, ptr, ptr_size);
// TODO: Call free on ptr if linear alloc supports free.
return new_ptr;
@@ -362,11 +367,12 @@
bool Contains(const void* ptr) const;
+ static constexpr size_t kAlignment = 8;
+
private:
void* AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind);
uint8_t* AllocFromNewArena(size_t bytes);
- static constexpr size_t kAlignment = 8;
void UpdateBytesAllocated();
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index f2575f7..97aa499 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -27,6 +27,8 @@
#include "mirror/object_array.h"
#include "handle_scope-inl.h"
+#include <atomic>
+
namespace art {
inline mirror::Class* ClassLinker::FindSystemClass(Thread* self, const char* descriptor) {
@@ -63,18 +65,21 @@
inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx, ArtMethod* referrer) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
// MethodVerifier refuses methods with string_idx out of bounds.
- DCHECK_LT(string_idx, declaring_class->GetDexCache()->NumStrings());
- mirror::String* resolved_string = declaring_class->GetDexCacheStrings()[string_idx].Read();
- if (UNLIKELY(resolved_string == nullptr)) {
+ DCHECK_LT(string_idx, declaring_class->GetDexFile().NumStringIds());;
+ mirror::String* string =
+ mirror::StringDexCachePair::LookupString(declaring_class->GetDexCacheStrings(),
+ string_idx,
+ mirror::DexCache::kDexCacheStringCacheSize).Read();
+ if (UNLIKELY(string == nullptr)) {
StackHandleScope<1> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
const DexFile& dex_file = *dex_cache->GetDexFile();
- resolved_string = ResolveString(dex_file, string_idx, dex_cache);
- if (resolved_string != nullptr) {
- DCHECK_EQ(dex_cache->GetResolvedString(string_idx), resolved_string);
+ string = ResolveString(dex_file, string_idx, dex_cache);
+ if (string != nullptr) {
+ DCHECK_EQ(dex_cache->GetResolvedString(string_idx), string);
}
}
- return resolved_string;
+ return string;
}
inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, ArtMethod* referrer) {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 4d48da6..1a3bba5 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -66,6 +66,7 @@
#include "mirror/class.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/field.h"
#include "mirror/iftable-inl.h"
@@ -1271,7 +1272,10 @@
// If the oat file expects the dex cache arrays to be in the BSS, then allocate there and
// copy over the arrays.
DCHECK(dex_file != nullptr);
- const size_t num_strings = dex_file->NumStringIds();
+ size_t num_strings = mirror::DexCache::kDexCacheStringCacheSize;
+ if (dex_file->NumStringIds() < num_strings) {
+ num_strings = dex_file->NumStringIds();
+ }
const size_t num_types = dex_file->NumTypeIds();
const size_t num_methods = dex_file->NumMethodIds();
const size_t num_fields = dex_file->NumFieldIds();
@@ -1281,16 +1285,17 @@
CHECK_EQ(num_fields, dex_cache->NumResolvedFields());
DexCacheArraysLayout layout(image_pointer_size_, dex_file);
uint8_t* const raw_arrays = oat_dex_file->GetDexCacheArrays();
- // The space is not yet visible to the GC, we can avoid the read barriers and use
- // std::copy_n.
if (num_strings != 0u) {
- GcRoot<mirror::String>* const image_resolved_strings = dex_cache->GetStrings();
- GcRoot<mirror::String>* const strings =
- reinterpret_cast<GcRoot<mirror::String>*>(raw_arrays + layout.StringsOffset());
- for (size_t j = 0; kIsDebugBuild && j < num_strings; ++j) {
- DCHECK(strings[j].IsNull());
+ mirror::StringDexCacheType* const image_resolved_strings = dex_cache->GetStrings();
+ mirror::StringDexCacheType* const strings =
+ reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset());
+ for (size_t j = 0; j < num_strings; ++j) {
+ DCHECK_EQ(strings[j].load(std::memory_order_relaxed).string_index, 0u);
+ DCHECK(strings[j].load(std::memory_order_relaxed).string_pointer.IsNull());
+ strings[j].store(image_resolved_strings[j].load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
}
- std::copy_n(image_resolved_strings, num_strings, strings);
+ mirror::StringDexCachePair::Initialize(strings);
dex_cache->SetStrings(strings);
}
if (num_types != 0u) {
@@ -1473,14 +1478,14 @@
bool operator()(mirror::Class* klass) const SHARED_REQUIRES(Locks::mutator_lock_) {
if (forward_strings_) {
- GcRoot<mirror::String>* strings = klass->GetDexCacheStrings();
+ mirror::StringDexCacheType* strings = klass->GetDexCacheStrings();
if (strings != nullptr) {
DCHECK(
space_->GetImageHeader().GetImageSection(ImageHeader::kSectionDexCacheArrays).Contains(
reinterpret_cast<uint8_t*>(strings) - space_->Begin()))
<< "String dex cache array for " << PrettyClass(klass) << " is not in app image";
// Dex caches have already been updated, so take the strings pointer from there.
- GcRoot<mirror::String>* new_strings = klass->GetDexCache()->GetStrings();
+ mirror::StringDexCacheType* new_strings = klass->GetDexCache()->GetStrings();
DCHECK_NE(strings, new_strings);
klass->SetDexCacheStrings(new_strings);
}
@@ -2079,18 +2084,31 @@
// Zero-initialized.
raw_arrays = reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
}
- GcRoot<mirror::String>* strings = (dex_file.NumStringIds() == 0u) ? nullptr :
- reinterpret_cast<GcRoot<mirror::String>*>(raw_arrays + layout.StringsOffset());
+ mirror::StringDexCacheType* strings = (dex_file.NumStringIds() == 0u) ? nullptr :
+ reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset());
GcRoot<mirror::Class>* types = (dex_file.NumTypeIds() == 0u) ? nullptr :
reinterpret_cast<GcRoot<mirror::Class>*>(raw_arrays + layout.TypesOffset());
ArtMethod** methods = (dex_file.NumMethodIds() == 0u) ? nullptr :
reinterpret_cast<ArtMethod**>(raw_arrays + layout.MethodsOffset());
ArtField** fields = (dex_file.NumFieldIds() == 0u) ? nullptr :
reinterpret_cast<ArtField**>(raw_arrays + layout.FieldsOffset());
+ size_t num_strings = mirror::DexCache::kDexCacheStringCacheSize;
+ if (dex_file.NumStringIds() < num_strings) {
+ num_strings = dex_file.NumStringIds();
+ }
+ DCHECK_ALIGNED(raw_arrays, alignof(mirror::StringDexCacheType)) <<
+ "Expected raw_arrays to align to StringDexCacheType.";
+ DCHECK_ALIGNED(layout.StringsOffset(), alignof(mirror::StringDexCacheType)) <<
+ "Expected StringsOffset() to align to StringDexCacheType.";
+ DCHECK_ALIGNED(strings, alignof(mirror::StringDexCacheType)) <<
+ "Expected strings to align to StringDexCacheType.";
+ static_assert(alignof(mirror::StringDexCacheType) == 8u,
+ "Expected StringDexCacheType to have align of 8.");
if (kIsDebugBuild) {
// Sanity check to make sure all the dex cache arrays are empty. b/28992179
- for (size_t i = 0; i < dex_file.NumStringIds(); ++i) {
- CHECK(strings[i].Read<kWithoutReadBarrier>() == nullptr);
+ for (size_t i = 0; i < num_strings; ++i) {
+ CHECK_EQ(strings[i].load(std::memory_order_relaxed).string_index, 0u);
+ CHECK(strings[i].load(std::memory_order_relaxed).string_pointer.IsNull());
}
for (size_t i = 0; i < dex_file.NumTypeIds(); ++i) {
CHECK(types[i].Read<kWithoutReadBarrier>() == nullptr);
@@ -2102,10 +2120,13 @@
CHECK(mirror::DexCache::GetElementPtrSize(fields, i, image_pointer_size_) == nullptr);
}
}
+ if (strings != nullptr) {
+ mirror::StringDexCachePair::Initialize(strings);
+ }
dex_cache->Init(&dex_file,
location.Get(),
strings,
- dex_file.NumStringIds(),
+ num_strings,
types,
dex_file.NumTypeIds(),
methods,
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 2a5198b..89bebb4 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -4059,7 +4059,7 @@
// Prepare JDWP ids for the reply.
JDWP::JdwpTag result_tag = BasicTagFromDescriptor(m->GetShorty());
const bool is_object_result = (result_tag == JDWP::JT_OBJECT);
- StackHandleScope<2> hs(soa.Self());
+ StackHandleScope<3> hs(soa.Self());
Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr);
Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException());
soa.Self()->ClearException();
@@ -4098,10 +4098,17 @@
// unless we threw, in which case we return null.
DCHECK_EQ(JDWP::JT_VOID, result_tag);
if (exceptionObjectId == 0) {
- // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the
- // object registry.
- result_value = GetObjectRegistry()->Add(pReq->receiver.Read());
- result_tag = TagFromObject(soa, pReq->receiver.Read());
+ if (m->GetDeclaringClass()->IsStringClass()) {
+ // For string constructors, the new string is remapped to the receiver (stored in ref).
+ Handle<mirror::Object> decoded_ref = hs.NewHandle(soa.Self()->DecodeJObject(ref.get()));
+ result_value = gRegistry->Add(decoded_ref);
+ result_tag = TagFromObject(soa, decoded_ref.Get());
+ } else {
+ // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the
+ // object registry.
+ result_value = GetObjectRegistry()->Add(pReq->receiver.Read());
+ result_tag = TagFromObject(soa, pReq->receiver.Read());
+ }
} else {
result_value = 0;
result_tag = JDWP::JT_OBJECT;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 7afe6f9..42816a0 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -435,10 +435,8 @@
gc_barrier_->Init(self, 0);
ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
FlipCallback flip_callback(this);
- heap_->ThreadFlipBegin(self); // Sync with JNI critical calls.
size_t barrier_count = Runtime::Current()->FlipThreadRoots(
&thread_flip_visitor, &flip_callback, this);
- heap_->ThreadFlipEnd(self);
{
ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
gc_barrier_->Increment(self, barrier_count);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 39f26e7..638c1d8 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -878,9 +878,13 @@
MutexLock mu(self, *thread_flip_lock_);
bool has_waited = false;
uint64_t wait_start = NanoTime();
- while (thread_flip_running_) {
- has_waited = true;
- thread_flip_cond_->Wait(self);
+ if (thread_flip_running_) {
+ TimingLogger::ScopedTiming split("IncrementDisableThreadFlip",
+ GetCurrentGcIteration()->GetTimings());
+ while (thread_flip_running_) {
+ has_waited = true;
+ thread_flip_cond_->Wait(self);
+ }
}
++disable_thread_flip_count_;
if (has_waited) {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 4505c24..ae6c321 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1197,9 +1197,9 @@
for (int32_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
mirror::DexCache* dex_cache = dex_caches->Get<kVerifyNone, kWithoutReadBarrier>(i);
// Fix up dex cache pointers.
- GcRoot<mirror::String>* strings = dex_cache->GetStrings();
+ mirror::StringDexCacheType* strings = dex_cache->GetStrings();
if (strings != nullptr) {
- GcRoot<mirror::String>* new_strings = fixup_adapter.ForwardObject(strings);
+ mirror::StringDexCacheType* new_strings = fixup_adapter.ForwardObject(strings);
if (strings != new_strings) {
dex_cache->SetStrings(new_strings);
}
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 716c23d..40b71c4 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -70,6 +70,16 @@
DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())))
#define ART_METHOD_QUICK_CODE_OFFSET_64 48
DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())))
+#define ART_METHOD_DECLARING_CLASS_OFFSET 0
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DECLARING_CLASS_OFFSET), (static_cast<int32_t>(art::ArtMethod:: DeclaringClassOffset().Int32Value())))
+#define DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET 40
+DEFINE_CHECK_EQ(static_cast<int32_t>(DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET), (static_cast<int32_t>(art::mirror::Class:: DexCacheStringsOffset().Int32Value())))
+#define STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT 3
+DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT), (static_cast<int32_t>(art::WhichPowerOf2(sizeof(art::mirror::StringDexCachePair)))))
+#define STRING_DEX_CACHE_SIZE_MINUS_ONE 1023
+DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_SIZE_MINUS_ONE), (static_cast<int32_t>(art::mirror::DexCache::kDexCacheStringCacheSize - 1)))
+#define STRING_DEX_CACHE_HASH_BITS 10
+DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))))
#define MIN_LARGE_OBJECT_THRESHOLD 0x3000
DEFINE_CHECK_EQ(static_cast<size_t>(MIN_LARGE_OBJECT_THRESHOLD), (static_cast<size_t>(art::gc::Heap::kMinLargeObjectThreshold)))
#define LOCK_WORD_STATE_SHIFT 30
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 4fd1514..90c8227 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -23,6 +23,7 @@
#include <iostream>
#include <sstream>
+#include <atomic>
#include "art_field-inl.h"
#include "art_method-inl.h"
@@ -37,6 +38,8 @@
#include "handle_scope-inl.h"
#include "jit/jit.h"
#include "mirror/class-inl.h"
+#include "mirror/dex_cache.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
@@ -264,15 +267,20 @@
ArtMethod* method = shadow_frame.GetMethod();
mirror::Class* declaring_class = method->GetDeclaringClass();
// MethodVerifier refuses methods with string_idx out of bounds.
- DCHECK_LT(string_idx, declaring_class->GetDexCache()->NumStrings());
- mirror::String* s = declaring_class->GetDexCacheStrings()[string_idx].Read();
- if (UNLIKELY(s == nullptr)) {
+ DCHECK_LT(string_idx % mirror::DexCache::kDexCacheStringCacheSize,
+ declaring_class->GetDexFile().NumStringIds());
+ mirror::String* string_ptr =
+ mirror::StringDexCachePair::LookupString(declaring_class->GetDexCacheStrings(),
+ string_idx,
+ mirror::DexCache::kDexCacheStringCacheSize).Read();
+ if (UNLIKELY(string_ptr == nullptr)) {
StackHandleScope<1> hs(self);
Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
- s = Runtime::Current()->GetClassLinker()->ResolveString(*method->GetDexFile(), string_idx,
- dex_cache);
+ string_ptr = Runtime::Current()->GetClassLinker()->ResolveString(*method->GetDexFile(),
+ string_idx,
+ dex_cache);
}
- return s;
+ return string_ptr;
}
// Handles div-int, div-int/2addr, div-int/li16 and div-int/lit8 instructions.
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 8ad47eb..0f2aac2 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -26,7 +26,6 @@
#include "base/length_prefixed_array.h"
#include "class_loader.h"
#include "common_throws.h"
-#include "dex_cache.h"
#include "dex_file.h"
#include "gc/heap-inl.h"
#include "iftable.h"
@@ -899,12 +898,12 @@
}
}
-inline void Class::SetDexCacheStrings(GcRoot<String>* new_dex_cache_strings) {
+inline void Class::SetDexCacheStrings(StringDexCacheType* new_dex_cache_strings) {
SetFieldPtr<false>(DexCacheStringsOffset(), new_dex_cache_strings);
}
-inline GcRoot<String>* Class::GetDexCacheStrings() {
- return GetFieldPtr<GcRoot<String>*>(DexCacheStringsOffset());
+inline StringDexCacheType* Class::GetDexCacheStrings() {
+ return GetFieldPtr64<StringDexCacheType*>(DexCacheStringsOffset());
}
template<ReadBarrierOption kReadBarrierOption, class Visitor>
@@ -1058,8 +1057,8 @@
dest->SetMethodsPtrInternal(new_methods);
}
// Update dex cache strings.
- GcRoot<mirror::String>* strings = GetDexCacheStrings();
- GcRoot<mirror::String>* new_strings = visitor(strings);
+ StringDexCacheType* strings = GetDexCacheStrings();
+ StringDexCacheType* new_strings = visitor(strings);
if (strings != new_strings) {
dest->SetDexCacheStrings(new_strings);
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 978fc4c..e2cd649 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -54,6 +54,9 @@
class DexCache;
class IfTable;
class Method;
+struct StringDexCachePair;
+
+using StringDexCacheType = std::atomic<mirror::StringDexCachePair>;
// C++ mirror of java.lang.Class
class MANAGED Class FINAL : public Object {
@@ -1219,8 +1222,8 @@
bool GetSlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_);
void SetSlowPath(bool enabled) SHARED_REQUIRES(Locks::mutator_lock_);
- GcRoot<String>* GetDexCacheStrings() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetDexCacheStrings(GcRoot<String>* new_dex_cache_strings)
+ StringDexCacheType* GetDexCacheStrings() SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetDexCacheStrings(StringDexCacheType* new_dex_cache_strings)
SHARED_REQUIRES(Locks::mutator_lock_);
static MemberOffset DexCacheStringsOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_strings_);
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 84469ea..a3071b7 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -27,6 +27,8 @@
#include "mirror/class.h"
#include "runtime.h"
+#include <atomic>
+
namespace art {
namespace mirror {
@@ -35,15 +37,18 @@
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
}
-inline String* DexCache::GetResolvedString(uint32_t string_idx) {
- DCHECK_LT(string_idx, NumStrings());
- return GetStrings()[string_idx].Read();
+inline mirror::String* DexCache::GetResolvedString(uint32_t string_idx) {
+ DCHECK_LT(string_idx, GetDexFile()->NumStringIds());
+ return StringDexCachePair::LookupString(GetStrings(), string_idx, NumStrings()).Read();
}
-inline void DexCache::SetResolvedString(uint32_t string_idx, String* resolved) {
- DCHECK_LT(string_idx, NumStrings());
+inline void DexCache::SetResolvedString(uint32_t string_idx, mirror::String* resolved) {
+ DCHECK_LT(string_idx % NumStrings(), NumStrings());
// TODO default transaction support.
- GetStrings()[string_idx] = GcRoot<String>(resolved);
+ StringDexCachePair idx_ptr;
+ idx_ptr.string_index = string_idx;
+ idx_ptr.string_pointer = GcRoot<String>(resolved);
+ GetStrings()[string_idx % NumStrings()].store(idx_ptr, std::memory_order_relaxed);
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
}
@@ -131,9 +136,16 @@
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
// Visit arrays after.
if (kVisitNativeRoots) {
- GcRoot<mirror::String>* strings = GetStrings();
+ mirror::StringDexCacheType* strings = GetStrings();
for (size_t i = 0, num_strings = NumStrings(); i != num_strings; ++i) {
- visitor.VisitRootIfNonNull(strings[i].AddressWithoutBarrier());
+ StringDexCachePair source = strings[i].load(std::memory_order_relaxed);
+ mirror::String* before = source.string_pointer.Read<kReadBarrierOption>();
+ GcRoot<mirror::String> root(before);
+ visitor.VisitRootIfNonNull(root.AddressWithoutBarrier());
+ if (root.Read() != before) {
+ source.string_pointer = GcRoot<String>(root.Read());
+ strings[i].store(source, std::memory_order_relaxed);
+ }
}
GcRoot<mirror::Class>* resolved_types = GetResolvedTypes();
for (size_t i = 0, num_types = NumResolvedTypes(); i != num_types; ++i) {
@@ -143,12 +155,14 @@
}
template <ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void DexCache::FixupStrings(GcRoot<mirror::String>* dest, const Visitor& visitor) {
- GcRoot<mirror::String>* src = GetStrings();
+inline void DexCache::FixupStrings(mirror::StringDexCacheType* dest, const Visitor& visitor) {
+ mirror::StringDexCacheType* src = GetStrings();
for (size_t i = 0, count = NumStrings(); i < count; ++i) {
- mirror::String* source = src[i].Read<kReadBarrierOption>();
- mirror::String* new_source = visitor(source);
- dest[i] = GcRoot<mirror::String>(new_source);
+ StringDexCachePair source = src[i].load(std::memory_order_relaxed);
+ mirror::String* ptr = source.string_pointer.Read<kReadBarrierOption>();
+ mirror::String* new_source = visitor(ptr);
+ source.string_pointer = GcRoot<String>(new_source);
+ dest[i].store(source, std::memory_order_relaxed);
}
}
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index 57066d8..cfcec9c 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -33,7 +33,7 @@
void DexCache::Init(const DexFile* dex_file,
String* location,
- GcRoot<String>* strings,
+ StringDexCacheType* strings,
uint32_t num_strings,
GcRoot<Class>* resolved_types,
uint32_t num_resolved_types,
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index d02a0d8..770c45d 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -35,12 +35,62 @@
class String;
+struct PACKED(8) StringDexCachePair {
+ GcRoot<String> string_pointer;
+ uint32_t string_index;
+ // The array is initially [ {0,0}, {0,0}, {0,0} ... ]
+ // We maintain the invariant that once a dex cache entry is populated,
+ // the pointer is always non-0
+ // Any given entry would thus be:
+ // {non-0, non-0} OR {0,0}
+ //
+ // It's generally sufficiently enough then to check if the
+ // lookup string index matches the stored string index (for a >0 string index)
+ // because if it's true the pointer is also non-null.
+ //
+ // For the 0th entry which is a special case, the value is either
+ // {0,0} (initial state) or {non-0, 0} which indicates
+ // that a valid string is stored at that index for a dex string id of 0.
+ //
+ // As an optimization, we want to avoid branching on the string pointer since
+ // it's always non-null if the string id branch succeeds (except for the 0th string id).
+ // Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail
+ // the lookup string id == stored id branch.
+ static void Initialize(StringDexCacheType* strings) {
+ DCHECK(StringDexCacheType().is_lock_free());
+ mirror::StringDexCachePair first_elem;
+ first_elem.string_pointer = GcRoot<String>(nullptr);
+ first_elem.string_index = 1;
+ strings[0].store(first_elem, std::memory_order_relaxed);
+ }
+ static GcRoot<String> LookupString(StringDexCacheType* dex_cache,
+ uint32_t string_idx,
+ uint32_t cache_size) {
+ StringDexCachePair index_string = dex_cache[string_idx % cache_size]
+ .load(std::memory_order_relaxed);
+ if (string_idx != index_string.string_index) return GcRoot<String>(nullptr);
+ DCHECK(!index_string.string_pointer.IsNull());
+ return index_string.string_pointer;
+ }
+};
+using StringDexCacheType = std::atomic<StringDexCachePair>;
+
+
// C++ mirror of java.lang.DexCache.
class MANAGED DexCache FINAL : public Object {
public:
// Size of java.lang.DexCache.class.
static uint32_t ClassSize(PointerSize pointer_size);
+ // Size of string dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
+ static constexpr size_t kDexCacheStringCacheSize = 1024;
+ static_assert(IsPowerOfTwo(kDexCacheStringCacheSize),
+ "String dex cache size is not a power of 2.");
+
+ static constexpr size_t StaticStringSize() {
+ return kDexCacheStringCacheSize;
+ }
+
// Size of an instance of java.lang.DexCache not including referenced values.
static constexpr uint32_t InstanceSize() {
return sizeof(DexCache);
@@ -48,7 +98,7 @@
void Init(const DexFile* dex_file,
String* location,
- GcRoot<String>* strings,
+ StringDexCacheType* strings,
uint32_t num_strings,
GcRoot<Class>* resolved_types,
uint32_t num_resolved_types,
@@ -62,7 +112,7 @@
SHARED_REQUIRES(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
- void FixupStrings(GcRoot<mirror::String>* dest, const Visitor& visitor)
+ void FixupStrings(StringDexCacheType* dest, const Visitor& visitor)
SHARED_REQUIRES(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
@@ -109,10 +159,10 @@
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_methods_);
}
- String* GetResolvedString(uint32_t string_idx) ALWAYS_INLINE
+ mirror::String* GetResolvedString(uint32_t string_idx) ALWAYS_INLINE
SHARED_REQUIRES(Locks::mutator_lock_);
- void SetResolvedString(uint32_t string_idx, String* resolved) ALWAYS_INLINE
+ void SetResolvedString(uint32_t string_idx, mirror::String* resolved) ALWAYS_INLINE
SHARED_REQUIRES(Locks::mutator_lock_);
Class* GetResolvedType(uint32_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -135,11 +185,11 @@
ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, PointerSize ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- GcRoot<String>* GetStrings() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
- return GetFieldPtr<GcRoot<String>*>(StringsOffset());
+ StringDexCacheType* GetStrings() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ return GetFieldPtr64<StringDexCacheType*>(StringsOffset());
}
- void SetStrings(GcRoot<String>* strings) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetStrings(StringDexCacheType* strings) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldPtr<false>(StringsOffset(), strings);
}
@@ -224,7 +274,8 @@
uint64_t resolved_fields_; // ArtField*, array with num_resolved_fields_ elements.
uint64_t resolved_methods_; // ArtMethod*, array with num_resolved_methods_ elements.
uint64_t resolved_types_; // GcRoot<Class>*, array with num_resolved_types_ elements.
- uint64_t strings_; // GcRoot<String>*, array with num_strings_ elements.
+ uint64_t strings_; // std::atomic<StringDexCachePair>*,
+ // array with num_strings_ elements.
uint32_t num_resolved_fields_; // Number of elements in the resolved_fields_ array.
uint32_t num_resolved_methods_; // Number of elements in the resolved_methods_ array.
uint32_t num_resolved_types_; // Number of elements in the resolved_types_ array.
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 48f2ca5..175997c 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -22,6 +22,7 @@
#include "common_runtime_test.h"
#include "linear_alloc.h"
#include "mirror/class_loader-inl.h"
+#include "mirror/dex_cache-inl.h"
#include "handle_scope-inl.h"
#include "scoped_thread_state_change.h"
@@ -40,7 +41,8 @@
Runtime::Current()->GetLinearAlloc())));
ASSERT_TRUE(dex_cache.Get() != nullptr);
- EXPECT_EQ(java_lang_dex_file_->NumStringIds(), dex_cache->NumStrings());
+ EXPECT_TRUE(dex_cache->StaticStringSize() == dex_cache->NumStrings()
+ || java_lang_dex_file_->NumStringIds() == dex_cache->NumStrings());
EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumResolvedTypes());
EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), dex_cache->NumResolvedMethods());
EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields());
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index 994ccb1..f0140a3 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -59,7 +59,7 @@
static jobject DexCache_getResolvedString(JNIEnv* env, jobject javaDexCache, jint string_index) {
ScopedFastNativeObjectAccess soa(env);
mirror::DexCache* dex_cache = soa.Decode<mirror::DexCache*>(javaDexCache);
- CHECK_LT(static_cast<size_t>(string_index), dex_cache->NumStrings());
+ CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds());
return soa.AddLocalReference<jobject>(dex_cache->GetResolvedString(string_index));
}
@@ -75,7 +75,7 @@
jobject string) {
ScopedFastNativeObjectAccess soa(env);
mirror::DexCache* dex_cache = soa.Decode<mirror::DexCache*>(javaDexCache);
- CHECK_LT(static_cast<size_t>(string_index), dex_cache->NumStrings());
+ CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds());
dex_cache->SetResolvedString(string_index, soa.Decode<mirror::String*>(string));
}
diff --git a/runtime/simulator/Android.mk b/runtime/simulator/Android.mk
index a34a841..e39af2d 100644
--- a/runtime/simulator/Android.mk
+++ b/runtime/simulator/Android.mk
@@ -22,6 +22,9 @@
code_simulator.cc \
code_simulator_arm64.cc
+LIBART_SIMULATOR_CFLAGS := \
+ -DVIXL_INCLUDE_SIMULATOR_AARCH64
+
# $(1): target or host
# $(2): ndebug or debug
define build-libart-simulator
@@ -54,6 +57,7 @@
LOCAL_MODULE_CLASS := SHARED_LIBRARIES
LOCAL_SRC_FILES := $$(LIBART_SIMULATOR_SRC_FILES)
+ LOCAL_CFLAGS := $$(LIBART_SIMULATOR_CFLAGS)
ifeq ($$(art_target_or_host),target)
$(call set-target-local-clang-vars)
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 3aa1fc2..216d8a7 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -224,6 +224,7 @@
thread_to_pass = this;
}
MutexLock mu(thread_to_pass, *Locks::thread_suspend_count_lock_);
+ ScopedTransitioningToRunnable scoped_transitioning_to_runnable(this);
old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
while ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index b35a614..79b9f02 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1217,10 +1217,8 @@
ScopedTrace trace(__FUNCTION__);
VLOG(threads) << this << " self-suspending";
// Make thread appear suspended to other threads, release mutator_lock_.
- tls32_.suspended_at_suspend_check = true;
// Transition to suspended and back to runnable, re-acquire share on mutator_lock_.
ScopedThreadSuspension(this, kSuspended);
- tls32_.suspended_at_suspend_check = false;
VLOG(threads) << this << " self-reviving";
}
@@ -1433,6 +1431,12 @@
if (o == nullptr) {
os << "an unknown object";
} else {
+ if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
+ // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
+ // may have not been flipped yet and "o" may be a from-space (stale) ref, in which case the
+ // IdentityHashCode call below will crash. So explicitly mark/forward it here.
+ o = ReadBarrier::Mark(o);
+ }
if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) &&
Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) {
// Getting the identity hashcode here would result in lock inflation and suspension of the
@@ -1635,7 +1639,7 @@
}
tlsPtr_.flip_function = nullptr;
tlsPtr_.thread_local_mark_stack = nullptr;
- tls32_.suspended_at_suspend_check = false;
+ tls32_.is_transitioning_to_runnable = false;
}
bool Thread::IsStillStarting() const {
@@ -1773,7 +1777,7 @@
CHECK(tlsPtr_.checkpoint_function == nullptr);
CHECK_EQ(checkpoint_overflow_.size(), 0u);
CHECK(tlsPtr_.flip_function == nullptr);
- CHECK_EQ(tls32_.suspended_at_suspend_check, false);
+ CHECK_EQ(tls32_.is_transitioning_to_runnable, false);
// Make sure we processed all deoptimization requests.
CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization";
diff --git a/runtime/thread.h b/runtime/thread.h
index 840b781..1c2d4ab 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1085,8 +1085,12 @@
return tlsPtr_.nested_signal_state;
}
- bool IsSuspendedAtSuspendCheck() const {
- return tls32_.suspended_at_suspend_check;
+ bool IsTransitioningToRunnable() const {
+ return tls32_.is_transitioning_to_runnable;
+ }
+
+ void SetIsTransitioningToRunnable(bool value) {
+ tls32_.is_transitioning_to_runnable = value;
}
void PushVerifier(verifier::MethodVerifier* verifier);
@@ -1264,7 +1268,7 @@
suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
thread_exit_check_count(0), handling_signal_(false),
- suspended_at_suspend_check(false), ready_for_debug_invoke(false),
+ is_transitioning_to_runnable(false), ready_for_debug_invoke(false),
debug_method_entry_(false), is_gc_marking(false), weak_ref_access_enabled(true),
disable_thread_flip_count(0) {
}
@@ -1306,10 +1310,10 @@
// True if signal is being handled by this thread.
bool32_t handling_signal_;
- // True if the thread is suspended in FullSuspendCheck(). This is
- // used to distinguish runnable threads that are suspended due to
- // a normal suspend check from other threads.
- bool32_t suspended_at_suspend_check;
+ // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the
+ // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from
+ // the rest of them.
+ bool32_t is_transitioning_to_runnable;
// True if the thread has been suspended by a debugger event. This is
// used to invoke method from the debugger which is only allowed when
@@ -1588,6 +1592,26 @@
Thread* const self_;
};
+class ScopedTransitioningToRunnable : public ValueObject {
+ public:
+ explicit ScopedTransitioningToRunnable(Thread* self)
+ : self_(self) {
+ DCHECK_EQ(self, Thread::Current());
+ if (kUseReadBarrier) {
+ self_->SetIsTransitioningToRunnable(true);
+ }
+ }
+
+ ~ScopedTransitioningToRunnable() {
+ if (kUseReadBarrier) {
+ self_->SetIsTransitioningToRunnable(false);
+ }
+ }
+
+ private:
+ Thread* const self_;
+};
+
std::ostream& operator<<(std::ostream& os, const Thread& thread);
std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 419ecec..688514c 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -405,6 +405,8 @@
Locks::thread_suspend_count_lock_->AssertNotHeld(self);
CHECK_NE(self->GetState(), kRunnable);
+ collector->GetHeap()->ThreadFlipBegin(self); // Sync with JNI critical calls.
+
SuspendAllInternal(self, self, nullptr);
// Run the flip callback for the collector.
@@ -414,26 +416,31 @@
collector->RegisterPause(NanoTime() - start_time);
// Resume runnable threads.
- std::vector<Thread*> runnable_threads;
+ size_t runnable_thread_count = 0;
std::vector<Thread*> other_threads;
{
+ TimingLogger::ScopedTiming split2("ResumeRunnableThreads", collector->GetTimings());
MutexLock mu(self, *Locks::thread_list_lock_);
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
--suspend_all_count_;
for (const auto& thread : list_) {
+ // Set the flip function for all threads because Thread::DumpState/DumpJavaStack() (invoked by
+ // a checkpoint) may cause the flip function to be run for a runnable/suspended thread before
+ // a runnable thread runs it for itself or we run it for a suspended thread below.
+ thread->SetFlipFunction(thread_flip_visitor);
if (thread == self) {
continue;
}
- // Set the flip function for both runnable and suspended threads
- // because Thread::DumpState/DumpJavaStack() (invoked by a
- // checkpoint) may cause the flip function to be run for a
- // runnable/suspended thread before a runnable threads runs it
- // for itself or we run it for a suspended thread below.
- thread->SetFlipFunction(thread_flip_visitor);
- if (thread->IsSuspendedAtSuspendCheck()) {
+ // Resume early the threads that were runnable but are suspended just for this thread flip or
+ // about to transition from non-runnable (eg. kNative at the SOA entry in a JNI function) to
+ // runnable (both cases waiting inside Thread::TransitionFromSuspendedToRunnable), or waiting
+ // for the thread flip to end at the JNI critical section entry (kWaitingForGcThreadFlip),
+ ThreadState state = thread->GetState();
+ if (state == kWaitingForGcThreadFlip ||
+ thread->IsTransitioningToRunnable()) {
// The thread will resume right after the broadcast.
thread->ModifySuspendCount(self, -1, nullptr, false);
- runnable_threads.push_back(thread);
+ ++runnable_thread_count;
} else {
other_threads.push_back(thread);
}
@@ -441,8 +448,11 @@
Thread::resume_cond_->Broadcast(self);
}
+ collector->GetHeap()->ThreadFlipEnd(self);
+
// Run the closure on the other threads and let them resume.
{
+ TimingLogger::ScopedTiming split3("FlipOtherThreads", collector->GetTimings());
ReaderMutexLock mu(self, *Locks::mutator_lock_);
for (const auto& thread : other_threads) {
Closure* flip_func = thread->GetFlipFunction();
@@ -451,11 +461,15 @@
}
}
// Run it for self.
- thread_flip_visitor->Run(self);
+ Closure* flip_func = self->GetFlipFunction();
+ if (flip_func != nullptr) {
+ flip_func->Run(self);
+ }
}
// Resume other threads.
{
+ TimingLogger::ScopedTiming split4("ResumeOtherThreads", collector->GetTimings());
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
for (const auto& thread : other_threads) {
thread->ModifySuspendCount(self, -1, nullptr, false);
@@ -463,7 +477,7 @@
Thread::resume_cond_->Broadcast(self);
}
- return runnable_threads.size() + other_threads.size() + 1; // +1 for self.
+ return runnable_thread_count + other_threads.size() + 1; // +1 for self.
}
void ThreadList::SuspendAll(const char* cause, bool long_suspend) {
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index 7733a51..4c63156 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -23,6 +23,7 @@
#include "base/logging.h"
#include "gc_root.h"
#include "globals.h"
+#include "mirror/dex_cache.h"
#include "primitive.h"
namespace art {
@@ -45,12 +46,11 @@
: DexCacheArraysLayout(pointer_size, dex_file->GetHeader()) {
}
-inline size_t DexCacheArraysLayout::Alignment() const {
+inline constexpr size_t DexCacheArraysLayout::Alignment() {
// GcRoot<> alignment is 4, i.e. lower than or equal to the pointer alignment.
static_assert(alignof(GcRoot<mirror::Class>) == 4, "Expecting alignof(GcRoot<>) == 4");
- static_assert(alignof(GcRoot<mirror::String>) == 4, "Expecting alignof(GcRoot<>) == 4");
- // Pointer alignment is the same as pointer size.
- return static_cast<size_t>(pointer_size_);
+ static_assert(alignof(mirror::StringDexCacheType) == 8, "Expecting alignof(StringDexCacheType) == 8");
+ return alignof(mirror::StringDexCacheType);
}
template <typename T>
@@ -87,15 +87,22 @@
}
inline size_t DexCacheArraysLayout::StringOffset(uint32_t string_idx) const {
- return strings_offset_ + ElementOffset(GcRootAsPointerSize<mirror::String>(), string_idx);
+ return strings_offset_ + ElementOffset(PointerSize::k64,
+ string_idx % mirror::DexCache::kDexCacheStringCacheSize);
}
inline size_t DexCacheArraysLayout::StringsSize(size_t num_elements) const {
- return ArraySize(GcRootAsPointerSize<mirror::String>(), num_elements);
+ size_t cache_size = mirror::DexCache::kDexCacheStringCacheSize;
+ if (num_elements < cache_size) {
+ cache_size = num_elements;
+ }
+ return ArraySize(PointerSize::k64, cache_size);
}
inline size_t DexCacheArraysLayout::StringsAlignment() const {
- return alignof(GcRoot<mirror::String>);
+ static_assert(alignof(mirror::StringDexCacheType) == 8,
+ "Expecting alignof(StringDexCacheType) == 8");
+ return alignof(mirror::StringDexCacheType);
}
inline size_t DexCacheArraysLayout::FieldOffset(uint32_t field_idx) const {
diff --git a/runtime/utils/dex_cache_arrays_layout.h b/runtime/utils/dex_cache_arrays_layout.h
index f2437fa..20ffa90 100644
--- a/runtime/utils/dex_cache_arrays_layout.h
+++ b/runtime/utils/dex_cache_arrays_layout.h
@@ -52,7 +52,7 @@
return size_;
}
- size_t Alignment() const;
+ static constexpr size_t Alignment();
size_t TypesOffset() const {
return types_offset_;
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 65debc9..75c4f34 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -225,9 +225,11 @@
# Disable 149-suspend-all-stress, its output is flaky (b/28988206).
# Disable 577-profile-foreign-dex (b/27454772).
+# Disable 552-checker-sharpening, until compiler component of new string dex cache is added (@cwadsworth, @vmarko)
TEST_ART_BROKEN_ALL_TARGET_TESTS := \
149-suspend-all-stress \
577-profile-foreign-dex \
+ 552-checker-sharpening \
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
@@ -557,16 +559,25 @@
# more parallel moves on x86, thus some Checker assertions may fail.
# 527: On ARM64 and ARM, the read barrier instrumentation does not support the HIntermediateAddress
# instruction yet (b/26601270).
-# 537: Expects an array copy to be intrinsified on x86-64, but calling-on-slowpath intrinsics are
-# not yet handled in the read barrier configuration.
TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS := \
484-checker-register-hints \
- 527-checker-array-access-split \
- 537-checker-arraycopy
+ 527-checker-array-access-split
# Tests that should fail in the read barrier configuration with JIT (Optimizing compiler).
TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS :=
+# Tests failing in non-Baker read barrier configurations with the Optimizing compiler (AOT).
+# 537: Expects an array copy to be intrinsified, but calling-on-slowpath intrinsics are not yet
+# handled in non-Baker read barrier configurations.
+TEST_ART_BROKEN_OPTIMIZING_NON_BAKER_READ_BARRIER_RUN_TESTS := \
+ 537-checker-arraycopy
+
+# Tests failing in non-Baker read barrier configurations with JIT (Optimizing compiler).
+# 537: Expects an array copy to be intrinsified, but calling-on-slowpath intrinsics are not yet
+# handled in non-Baker read barrier configurations.
+TEST_ART_BROKEN_JIT_NON_BAKER_READ_BARRIER_RUN_TESTS := \
+ 537-checker-arraycopy
+
ifeq ($(ART_USE_READ_BARRIER),true)
ifneq (,$(filter interpreter,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
@@ -577,9 +588,15 @@
ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
- $(PREBUILD_TYPES),$(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES), \
- $(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
+ $(PREBUILD_TYPES),$(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES), \
+ $(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
$(TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ ifneq ($(ART_READ_BARRIER_TYPE),BAKER)
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
+ $(PREBUILD_TYPES),$(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES), \
+ $(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
+ $(TEST_ART_BROKEN_OPTIMIZING_NON_BAKER_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ endif
endif
ifneq (,$(filter jit,$(COMPILER_TYPES)))
@@ -587,6 +604,12 @@
$(PREBUILD_TYPES),jit,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES), \
$(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
$(TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ ifneq ($(ART_READ_BARRIER_TYPE),BAKER)
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
+ $(PREBUILD_TYPES),jit,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES), \
+ $(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
+ $(TEST_ART_BROKEN_JIT_NON_BAKER_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ endif
endif
endif
diff --git a/tools/cpp-define-generator/constant_dexcache.def b/tools/cpp-define-generator/constant_dexcache.def
new file mode 100644
index 0000000..fd197f2
--- /dev/null
+++ b/tools/cpp-define-generator/constant_dexcache.def
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "mirror/dex_cache.h" // art::mirror::DexCache, StringDexCachePair
+#endif
+
+DEFINE_EXPR(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT, int32_t, art::WhichPowerOf2(sizeof(art::mirror::StringDexCachePair)))
+DEFINE_EXPR(STRING_DEX_CACHE_SIZE_MINUS_ONE, int32_t, art::mirror::DexCache::kDexCacheStringCacheSize - 1)
+DEFINE_EXPR(STRING_DEX_CACHE_HASH_BITS, int32_t,
+ art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))
\ No newline at end of file
diff --git a/tools/cpp-define-generator/offset_dexcache.def b/tools/cpp-define-generator/offset_dexcache.def
index 3b26518..4b9d481 100644
--- a/tools/cpp-define-generator/offset_dexcache.def
+++ b/tools/cpp-define-generator/offset_dexcache.def
@@ -19,16 +19,27 @@
#if defined(DEFINE_INCLUDE_DEPENDENCIES)
#include "art_method.h" // art::ArtMethod
#include "base/enums.h" // PointerSize
+#include "mirror/dex_cache.h" // art::DexCache
#endif
-#define DEFINE_ART_METHOD_OFFSET(field_name, method_name) \
+#define DEFINE_ART_METHOD_OFFSET_SIZED(field_name, method_name) \
DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET_32, int32_t, art::ArtMethod::method_name##Offset(art::PointerSize::k32).Int32Value()) \
DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET_64, int32_t, art::ArtMethod::method_name##Offset(art::PointerSize::k64).Int32Value())
+#define DEFINE_ART_METHOD_OFFSET(field_name, method_name) \
+ DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET, int32_t, art::ArtMethod::method_name##Offset().Int32Value())
+
+#define DEFINE_DECLARING_CLASS_OFFSET(field_name, method_name) \
+ DEFINE_EXPR(DECLARING_CLASS_ ## field_name ## _OFFSET, int32_t, art::mirror::Class::method_name##Offset().Int32Value())
+
// New macro suffix Method Name (of the Offset method)
-DEFINE_ART_METHOD_OFFSET(DEX_CACHE_METHODS, DexCacheResolvedMethods)
-DEFINE_ART_METHOD_OFFSET(DEX_CACHE_TYPES, DexCacheResolvedTypes)
-DEFINE_ART_METHOD_OFFSET(JNI, EntryPointFromJni)
-DEFINE_ART_METHOD_OFFSET(QUICK_CODE, EntryPointFromQuickCompiledCode)
+DEFINE_ART_METHOD_OFFSET_SIZED(DEX_CACHE_METHODS, DexCacheResolvedMethods)
+DEFINE_ART_METHOD_OFFSET_SIZED(DEX_CACHE_TYPES, DexCacheResolvedTypes)
+DEFINE_ART_METHOD_OFFSET_SIZED(JNI, EntryPointFromJni)
+DEFINE_ART_METHOD_OFFSET_SIZED(QUICK_CODE, EntryPointFromQuickCompiledCode)
+DEFINE_ART_METHOD_OFFSET(DECLARING_CLASS, DeclaringClass)
+DEFINE_DECLARING_CLASS_OFFSET(DEX_CACHE_STRINGS, DexCacheStrings)
#undef DEFINE_ART_METHOD_OFFSET
+#undef DEFINE_ART_METHOD_OFFSET_32
+#undef DEFINE_DECLARING_CLASS_OFFSET
diff --git a/tools/cpp-define-generator/offsets_all.def b/tools/cpp-define-generator/offsets_all.def
index d2d8777..13371a1 100644
--- a/tools/cpp-define-generator/offsets_all.def
+++ b/tools/cpp-define-generator/offsets_all.def
@@ -48,6 +48,7 @@
// TODO: MIRROR_*_ARRAY offsets (depends on header size)
// TODO: MIRROR_STRING offsets (depends on header size)
#include "offset_dexcache.def"
+#include "constant_dexcache.def"
#include "constant_heap.def"
#include "constant_lockword.def"
#include "constant_globals.def"
diff --git a/tools/javafuzz/README.md b/tools/javafuzz/README.md
index ca8532a..35c057c 100644
--- a/tools/javafuzz/README.md
+++ b/tools/javafuzz/README.md
@@ -1,12 +1,12 @@
JavaFuzz
========
-JavaFuzz is tool for generating random Java programs with the objective of
-fuzz testing the ART infrastructure. Each randomly generated Java program
+JavaFuzz is a tool for generating random Java programs with the objective
+of fuzz testing the ART infrastructure. Each randomly generated Java program
can be run under various modes of execution, such as using the interpreter,
using the optimizing compiler, using an external reference implementation,
or using various target architectures. Any difference between the outputs
-(a divergence) may indicate a bug in one of the execution modes.
+(**divergence**) may indicate a bug in one of the execution modes.
JavaFuzz can be combined with dexfuzz to get multilayered fuzz testing.
@@ -36,6 +36,24 @@
jack -cp ${JACK_CLASSPATH} --output-dex . Test.java
art -classpath classes.dex Test
+How to start the JavaFuzz tests
+===============================
+
+ run_java_fuzz_test.py [--num_tests]
+ [--mode1=mode] [--mode2=mode]
+
+where
+
+ --num_tests: number of tests to run (10000 by default)
+ --mode1:m1
+ --mode2:m2
+ with m1 != m2, and one of
+ ri : reference implementation on host (default for m1)
+ hint : Art interpreter on host
+ hopt : Art optimizing on host (default for m2)
+ tint : Art interpreter on target
+ topt : Art optimizing on target
+
Background
==========
diff --git a/tools/javafuzz/run_java_fuzz_test.py b/tools/javafuzz/run_java_fuzz_test.py
new file mode 100755
index 0000000..4f192e7
--- /dev/null
+++ b/tools/javafuzz/run_java_fuzz_test.py
@@ -0,0 +1,406 @@
+#!/usr/bin/env python2
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import argparse
+import subprocess
+import sys
+import os
+
+from tempfile import mkdtemp
+from threading import Timer
+
+# Normalized return codes.
+EXIT_SUCCESS = 0
+EXIT_TIMEOUT = 1
+EXIT_NOTCOMPILED = 2
+EXIT_NOTRUN = 3
+
+#
+# Utility methods.
+#
+
+def RunCommand(cmd, args, out, err, timeout = 5):
+ """Executes a command, and returns its return code.
+
+ Args:
+ cmd: string, a command to execute
+ args: string, arguments to pass to command (or None)
+ out: string, file name to open for stdout (or None)
+ err: string, file name to open for stderr (or None)
+ timeout: int, time out in seconds
+ Returns:
+ return code of running command (forced EXIT_TIMEOUT on timeout)
+ """
+ cmd = 'exec ' + cmd # preserve pid
+ if args != None:
+ cmd = cmd + ' ' + args
+ outf = None
+ if out != None:
+ outf = open(out, mode='w')
+ errf = None
+ if err != None:
+ errf = open(err, mode='w')
+ proc = subprocess.Popen(cmd, stdout=outf, stderr=errf, shell=True)
+ timer = Timer(timeout, proc.kill) # enforces timeout
+ timer.start()
+ proc.communicate()
+ if timer.is_alive():
+ timer.cancel()
+ returncode = proc.returncode
+ else:
+ returncode = EXIT_TIMEOUT
+ if outf != None:
+ outf.close()
+ if errf != None:
+ errf.close()
+ return returncode
+
+def GetJackClassPath():
+ """Returns Jack's classpath."""
+ top = os.environ.get('ANDROID_BUILD_TOP')
+ if top == None:
+ raise FatalError('Cannot find AOSP build top')
+ libdir = top + '/out/host/common/obj/JAVA_LIBRARIES'
+ return libdir + '/core-libart-hostdex_intermediates/classes.jack:' \
+ + libdir + '/core-oj-hostdex_intermediates/classes.jack'
+
+def GetExecutionModeRunner(mode):
+ """Returns a runner for the given execution mode.
+
+ Args:
+ mode: string, execution mode
+ Returns:
+ TestRunner with given execution mode
+ Raises:
+ FatalError: error for unknown execution mode
+ """
+ if mode == 'ri':
+ return TestRunnerRIOnHost()
+ if mode == 'hint':
+ return TestRunnerArtOnHost(True)
+ if mode == 'hopt':
+ return TestRunnerArtOnHost(False)
+ if mode == 'tint':
+ return TestRunnerArtOnTarget(True)
+ if mode == 'topt':
+ return TestRunnerArtOnTarget(False)
+ raise FatalError('Unknown execution mode')
+
+def GetReturnCode(retc):
+ """Returns a string representation of the given normalized return code.
+ Args:
+ retc: int, normalized return code
+ Returns:
+ string representation of normalized return code
+ Raises:
+ FatalError: error for unknown normalized return code
+ """
+ if retc == EXIT_SUCCESS:
+ return 'SUCCESS'
+ if retc == EXIT_TIMEOUT:
+ return 'TIMED-OUT'
+ if retc == EXIT_NOTCOMPILED:
+ return 'NOT-COMPILED'
+ if retc == EXIT_NOTRUN:
+ return 'NOT-RUN'
+ raise FatalError('Unknown normalized return code')
+
+#
+# Execution mode classes.
+#
+
+class TestRunner(object):
+ """Abstraction for running a test in a particular execution mode."""
+ __meta_class__ = abc.ABCMeta
+
+ def GetDescription(self):
+ """Returns a description string of the execution mode."""
+ return self._description
+
+ def GetId(self):
+ """Returns a short string that uniquely identifies the execution mode."""
+ return self._id
+
+ @abc.abstractmethod
+ def CompileAndRunTest(self):
+ """Compile and run the generated test.
+
+ Ensures that the current Test.java in the temporary directory is compiled
+ and executed under the current execution mode. On success, transfers the
+ generated output to the file GetId()_out.txt in the temporary directory.
+ Cleans up after itself.
+
+ Most nonzero return codes are assumed non-divergent, since systems may
+ exit in different ways. This is enforced by normalizing return codes.
+
+ Returns:
+ normalized return code
+ """
+ pass
+
+class TestRunnerRIOnHost(TestRunner):
+ """Concrete test runner of the reference implementation on host."""
+
+ def __init__(self):
+ """Constructor for the RI tester."""
+ self._description = 'RI on host'
+ self._id = 'RI'
+
+ def CompileAndRunTest(self):
+ if RunCommand('javac', 'Test.java',
+ out=None, err=None, timeout=30) == EXIT_SUCCESS:
+ retc = RunCommand('java', 'Test', 'RI_run_out.txt', err=None)
+ if retc != EXIT_SUCCESS and retc != EXIT_TIMEOUT:
+ retc = EXIT_NOTRUN
+ else:
+ retc = EXIT_NOTCOMPILED
+ # Cleanup and return.
+ RunCommand('rm', '-f Test.class', out=None, err=None)
+ return retc
+
+class TestRunnerArtOnHost(TestRunner):
+ """Concrete test runner of Art on host (interpreter or optimizing)."""
+
+ def __init__(self, interpreter):
+ """Constructor for the Art on host tester.
+
+ Args:
+ interpreter: boolean, selects between interpreter or optimizing
+ """
+ self._art_args = '-cp classes.dex Test'
+ if interpreter:
+ self._description = 'Art interpreter on host'
+ self._id = 'HInt'
+ self._art_args = '-Xint ' + self._art_args
+ else:
+ self._description = 'Art optimizing on host'
+ self._id = 'HOpt'
+ self._jack_args = '-cp ' + GetJackClassPath() + ' --output-dex . Test.java'
+
+ def CompileAndRunTest(self):
+ if RunCommand('jack', self._jack_args,
+ out=None, err='jackerr.txt', timeout=30) == EXIT_SUCCESS:
+ out = self.GetId() + '_run_out.txt'
+ retc = RunCommand('art', self._art_args, out, 'arterr.txt')
+ if retc != EXIT_SUCCESS and retc != EXIT_TIMEOUT:
+ retc = EXIT_NOTRUN
+ else:
+ retc = EXIT_NOTCOMPILED
+ # Cleanup and return.
+ RunCommand('rm', '-rf classes.dex jackerr.txt arterr.txt android-data*',
+ out=None, err=None)
+ return retc
+
+# TODO: very rough first version without proper cache,
+# reuse staszkiewicz' module for properly setting up dalvikvm on target.
+class TestRunnerArtOnTarget(TestRunner):
+ """Concrete test runner of Art on target (interpreter or optimizing)."""
+
+ def __init__(self, interpreter):
+ """Constructor for the Art on target tester.
+
+ Args:
+ interpreter: boolean, selects between interpreter or optimizing
+ """
+ self._dalvik_args = '-cp /data/local/tmp/classes.dex Test'
+ if interpreter:
+ self._description = 'Art interpreter on target'
+ self._id = 'TInt'
+ self._dalvik_args = '-Xint ' + self._dalvik_args
+ else:
+ self._description = 'Art optimizing on target'
+ self._id = 'TOpt'
+ self._jack_args = '-cp ' + GetJackClassPath() + ' --output-dex . Test.java'
+
+ def CompileAndRunTest(self):
+ if RunCommand('jack', self._jack_args,
+ out=None, err='jackerr.txt', timeout=30) == EXIT_SUCCESS:
+ if RunCommand('adb push', 'classes.dex /data/local/tmp/',
+ 'adb.txt', err=None) != EXIT_SUCCESS:
+ raise FatalError('Cannot push to target device')
+ out = self.GetId() + '_run_out.txt'
+ retc = RunCommand('adb shell dalvikvm', self._dalvik_args, out, err=None)
+ if retc != EXIT_SUCCESS and retc != EXIT_TIMEOUT:
+ retc = EXIT_NOTRUN
+ else:
+ retc = EXIT_NOTCOMPILED
+ # Cleanup and return.
+ RunCommand('rm', '-f classes.dex jackerr.txt adb.txt',
+ out=None, err=None)
+ RunCommand('adb shell', 'rm -f /data/local/tmp/classes.dex',
+ out=None, err=None)
+ return retc
+
+#
+# Tester classes.
+#
+
+class FatalError(Exception):
+ """Fatal error in the tester."""
+ pass
+
+class JavaFuzzTester(object):
+ """Tester that runs JavaFuzz many times and report divergences."""
+
+ def __init__(self, num_tests, mode1, mode2):
+ """Constructor for the tester.
+
+ Args:
+ num_tests: int, number of tests to run
+ mode1: string, execution mode for first runner
+ mode2: string, execution mode for second runner
+ """
+ self._num_tests = num_tests
+ self._runner1 = GetExecutionModeRunner(mode1)
+ self._runner2 = GetExecutionModeRunner(mode2)
+ self._save_dir = None
+ self._tmp_dir = None
+ # Statistics.
+ self._test = 0
+ self._num_success = 0
+ self._num_not_compiled = 0
+ self._num_not_run = 0
+ self._num_timed_out = 0
+ self._num_divergences = 0
+
+ def __enter__(self):
+ """On entry, enters new temp directory after saving current directory.
+
+ Raises:
+ FatalError: error when temp directory cannot be constructed
+ """
+ self._save_dir = os.getcwd()
+ self._tmp_dir = mkdtemp(dir="/tmp/")
+ if self._tmp_dir == None:
+ raise FatalError('Cannot obtain temp directory')
+ os.chdir(self._tmp_dir)
+ return self
+
+ def __exit__(self, etype, evalue, etraceback):
+ """On exit, re-enters previously saved current directory and cleans up."""
+ os.chdir(self._save_dir)
+ if self._num_divergences == 0:
+ RunCommand('rm', '-rf ' + self._tmp_dir, out=None, err=None)
+
+ def Run(self):
+ """Runs JavaFuzz many times and report divergences."""
+ print
+ print '**\n**** JavaFuzz Testing\n**'
+ print
+ print '#Tests :', self._num_tests
+ print 'Directory :', self._tmp_dir
+ print 'Exec-mode1:', self._runner1.GetDescription()
+ print 'Exec-mode2:', self._runner2.GetDescription()
+ print
+ self.ShowStats()
+ for self._test in range(1, self._num_tests + 1):
+ self.RunJavaFuzzTest()
+ self.ShowStats()
+ if self._num_divergences == 0:
+ print '\n\nsuccess (no divergences)\n'
+ else:
+ print '\n\nfailure (divergences)\n'
+
+ def ShowStats(self):
+ """Shows current statistics (on same line) while tester is running."""
+ print '\rTests:', self._test, \
+ 'Success:', self._num_success, \
+ 'Not-compiled:', self._num_not_compiled, \
+ 'Not-run:', self._num_not_run, \
+ 'Timed-out:', self._num_timed_out, \
+ 'Divergences:', self._num_divergences,
+ sys.stdout.flush()
+
+ def RunJavaFuzzTest(self):
+ """Runs a single JavaFuzz test, comparing two execution modes."""
+ self.ConstructTest()
+ retc1 = self._runner1.CompileAndRunTest()
+ retc2 = self._runner2.CompileAndRunTest()
+ self.CheckForDivergence(retc1, retc2)
+ self.CleanupTest()
+
+ def ConstructTest(self):
+ """Use JavaFuzz to generate next Test.java test.
+
+ Raises:
+ FatalError: error when javafuzz fails
+ """
+ if RunCommand('javafuzz', args=None,
+ out='Test.java', err=None) != EXIT_SUCCESS:
+ raise FatalError('Unexpected error while running JavaFuzz')
+
+ def CheckForDivergence(self, retc1, retc2):
+ """Checks for divergences and updates statistics.
+
+ Args:
+ retc1: int, normalized return code of first runner
+ retc2: int, normalized return code of second runner
+ """
+ if retc1 == retc2:
+ # Non-divergent in return code.
+ if retc1 == EXIT_SUCCESS:
+ # Both compilations and runs were successful, inspect generated output.
+ args = self._runner1.GetId() + '_run_out.txt ' \
+ + self._runner2.GetId() + '_run_out.txt'
+ if RunCommand('diff', args, out=None, err=None) != EXIT_SUCCESS:
+ self.ReportDivergence('divergence in output')
+ else:
+ self._num_success += 1
+ elif retc1 == EXIT_TIMEOUT:
+ self._num_timed_out += 1
+ elif retc1 == EXIT_NOTCOMPILED:
+ self._num_not_compiled += 1
+ else:
+ self._num_not_run += 1
+ else:
+ # Divergent in return code.
+ self.ReportDivergence('divergence in return code: ' +
+ GetReturnCode(retc1) + ' vs. ' +
+ GetReturnCode(retc2))
+
+ def ReportDivergence(self, reason):
+ """Reports and saves a divergence."""
+ self._num_divergences += 1
+ print '\n', self._test, reason
+ # Save.
+ ddir = 'divergence' + str(self._test)
+ RunCommand('mkdir', ddir, out=None, err=None)
+ RunCommand('mv', 'Test.java *.txt ' + ddir, out=None, err=None)
+
+ def CleanupTest(self):
+ """Cleans up after a single test run."""
+ RunCommand('rm', '-f Test.java *.txt', out=None, err=None)
+
+
+def main():
+ # Handle arguments.
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--num_tests', default=10000,
+ type=int, help='number of tests to run')
+ parser.add_argument('--mode1', default='ri',
+ help='execution mode 1 (default: ri)')
+ parser.add_argument('--mode2', default='hopt',
+ help='execution mode 2 (default: hopt)')
+ args = parser.parse_args()
+ if args.mode1 == args.mode2:
+ raise FatalError("Identical execution modes given")
+ # Run the JavaFuzz tester.
+ with JavaFuzzTester(args.num_tests, args.mode1, args.mode2) as fuzzer:
+ fuzzer.Run()
+
+if __name__ == "__main__":
+ main()