Use enum for read barrier options in compiler
Enums are just phenomenal. Also fixed a double load error in x86
interface check cast fast path.
Test: test-art-host
Change-Id: Iea403ce579145b6a294073f3900ad6921c1a0d53
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index a81f24e..bf246ad 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -30,6 +30,7 @@
#include "memory_region.h"
#include "nodes.h"
#include "optimizing_compiler_stats.h"
+#include "read_barrier_option.h"
#include "stack_map_stream.h"
#include "utils/label.h"
@@ -50,6 +51,9 @@
// Maximum value for a primitive long.
static int64_t constexpr kPrimLongMax = INT64_C(0x7fffffffffffffff);
+static constexpr ReadBarrierOption kCompilerReadBarrierOption =
+ kEmitCompilerReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
+
class Assembler;
class CodeGenerator;
class CompilerDriver;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 8ca8b8a..32642e1 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -5748,7 +5748,9 @@
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
- const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
+ ? kWithoutReadBarrier
+ : kCompilerReadBarrierOption;
bool generate_null_check = false;
switch (cls->GetLoadKind()) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -5760,17 +5762,17 @@
out_loc,
current_method,
ArtMethod::DeclaringClassOffset().Int32Value(),
- requires_read_barrier);
+ read_barrier_option);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ LoadLiteral(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
CodeGeneratorARM::PcRelativePatchInfo* labels =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
__ BindTrackedLabel(&labels->movw_label);
@@ -5782,7 +5784,7 @@
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
DCHECK_NE(cls->GetAddress(), 0u);
uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
__ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
@@ -5802,7 +5804,7 @@
uint32_t offset = address & MaxInt<uint32_t>(offset_bits);
__ LoadLiteral(out, codegen_->DeduplicateDexCacheAddressLiteral(base_address));
// /* GcRoot<mirror::Class> */ out = *(base_address + offset)
- GenerateGcRootFieldLoad(cls, out_loc, out, offset, requires_read_barrier);
+ GenerateGcRootFieldLoad(cls, out_loc, out, offset, read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -5811,7 +5813,7 @@
HArmDexCacheArraysBase* base = cls->InputAt(0)->AsArmDexCacheArraysBase();
int32_t offset = cls->GetDexCacheElementOffset() - base->GetElementOffset();
// /* GcRoot<mirror::Class> */ out = *(dex_cache_arrays_base + offset)
- GenerateGcRootFieldLoad(cls, out_loc, base_reg, offset, requires_read_barrier);
+ GenerateGcRootFieldLoad(cls, out_loc, base_reg, offset, read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -5825,7 +5827,7 @@
ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
// /* GcRoot<mirror::Class> */ out = out[type_index]
size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex());
- GenerateGcRootFieldLoad(cls, out_loc, out, offset, requires_read_barrier);
+ GenerateGcRootFieldLoad(cls, out_loc, out, offset, read_barrier_option);
generate_null_check = !cls->IsInDexCache();
}
}
@@ -5968,7 +5970,7 @@
__ movt(temp, /* placeholder */ 0u);
__ BindTrackedLabel(&labels->add_pc_label);
__ add(temp, temp, ShifterOperand(PC));
- GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kEmitCompilerReadBarrier);
+ GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
codegen_->AddSlowPath(slow_path);
__ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
@@ -6108,7 +6110,7 @@
obj_loc,
class_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
@@ -6130,7 +6132,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
// If `out` is null, we use it for the result, and jump to `done`.
__ CompareAndBranchIfZero(out, &done);
__ cmp(out, ShifterOperand(cls));
@@ -6153,7 +6155,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ CompareAndBranchIfNonZero(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ b(&done);
@@ -6176,7 +6178,7 @@
out_loc,
component_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
// If `out` is null, we use it for the result, and jump to `done`.
__ CompareAndBranchIfZero(out, &done);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
@@ -6296,6 +6298,9 @@
const uint32_t object_array_data_offset =
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
+ // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
+ // read barriers is done for performance and code size reasons.
bool is_type_check_slow_path_fatal = false;
if (!kEmitCompilerReadBarrier) {
is_type_check_slow_path_fatal =
@@ -6325,7 +6330,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
__ cmp(temp, ShifterOperand(cls));
// Jump to slow path for throwing the exception or doing a
@@ -6341,7 +6346,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
@@ -6352,7 +6357,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception.
@@ -6371,7 +6376,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Walk over the class hierarchy to find a match.
Label loop;
@@ -6384,7 +6389,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception.
@@ -6401,7 +6406,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Do an exact check.
__ cmp(temp, ShifterOperand(cls));
@@ -6413,7 +6418,7 @@
temp_loc,
component_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the component type is null, jump to the slow path to throw the exception.
__ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel());
// Otherwise,the object is indeed an array, jump to label `check_non_primitive_component_type`
@@ -6446,7 +6451,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// /* HeapReference<Class> */ temp = temp->iftable_
GenerateReferenceLoadTwoRegisters(instruction,
@@ -6454,7 +6459,7 @@
temp_loc,
iftable_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
Label is_null;
// Null iftable means it is empty and will always fail the check.
// Not cbz since the temp may not be a low register.
@@ -6754,13 +6759,14 @@
}
}
-void InstructionCodeGeneratorARM::GenerateReferenceLoadOneRegister(HInstruction* instruction,
- Location out,
- uint32_t offset,
- Location maybe_temp,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorARM::GenerateReferenceLoadOneRegister(
+ HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
Register out_reg = out.AsRegister<Register>();
- if (emit_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
CHECK(kEmitCompilerReadBarrier);
DCHECK(maybe_temp.IsRegister()) << maybe_temp;
if (kUseBakerReadBarrier) {
@@ -6786,15 +6792,16 @@
}
}
-void InstructionCodeGeneratorARM::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
- Location out,
- Location obj,
- uint32_t offset,
- Location maybe_temp,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorARM::GenerateReferenceLoadTwoRegisters(
+ HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
Register out_reg = out.AsRegister<Register>();
Register obj_reg = obj.AsRegister<Register>();
- if (emit_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
CHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
DCHECK(maybe_temp.IsRegister()) << maybe_temp;
@@ -6820,9 +6827,9 @@
Location root,
Register obj,
uint32_t offset,
- bool requires_read_barrier) {
+ ReadBarrierOption read_barrier_option) {
Register root_reg = root.AsRegister<Register>();
- if (requires_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
DCHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index e953df8..f95dd57 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -264,7 +264,7 @@
Location out,
uint32_t offset,
Location maybe_temp,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a heap reference load using two different registers
// `out` and `obj`:
//
@@ -280,17 +280,17 @@
Location obj,
uint32_t offset,
Location maybe_temp,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a GC root reference load:
//
// root <- *(obj + offset)
//
- // while honoring read barriers if `requires_read_barrier` is true.
+ // while honoring read barriers based on read_barrier_option.
void GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
Register obj,
uint32_t offset,
- bool requires_read_barrier);
+ ReadBarrierOption read_barrier_option);
void GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
Label* true_target,
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 6f55b42..ef4e511 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -3418,7 +3418,7 @@
obj_loc,
class_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
@@ -3440,7 +3440,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
// If `out` is null, we use it for the result, and jump to `done`.
__ Cbz(out, &done);
__ Cmp(out, cls);
@@ -3463,7 +3463,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ Cbnz(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ B(&done);
@@ -3486,7 +3486,7 @@
out_loc,
component_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
// If `out` is null, we use it for the result, and jump to `done`.
__ Cbz(out, &done);
__ Ldrh(out, HeapOperand(out, primitive_offset));
@@ -3609,6 +3609,9 @@
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
bool is_type_check_slow_path_fatal = false;
+ // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
+ // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
+ // read barriers is done for performance and code size reasons.
if (!kEmitCompilerReadBarrier) {
is_type_check_slow_path_fatal =
(type_check_kind == TypeCheckKind::kExactCheck ||
@@ -3637,7 +3640,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
__ Cmp(temp, cls);
// Jump to slow path for throwing the exception or doing a
@@ -3653,7 +3656,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
@@ -3664,7 +3667,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception.
@@ -3682,7 +3685,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Walk over the class hierarchy to find a match.
vixl::aarch64::Label loop;
@@ -3695,7 +3698,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is not null, jump
// back at the beginning of the loop.
@@ -3712,7 +3715,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Do an exact check.
__ Cmp(temp, cls);
@@ -3724,7 +3727,7 @@
temp_loc,
component_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the component type is null, jump to the slow path to throw the exception.
__ Cbz(temp, type_check_slow_path->GetEntryLabel());
@@ -3755,7 +3758,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// /* HeapReference<Class> */ temp = temp->iftable_
GenerateReferenceLoadTwoRegisters(instruction,
@@ -3763,7 +3766,7 @@
temp_loc,
iftable_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
vixl::aarch64::Label is_null;
// Null iftable means it is empty and will always fail the check.
__ Cbz(temp, &is_null);
@@ -4348,7 +4351,9 @@
Location out_loc = cls->GetLocations()->Out();
Register out = OutputRegister(cls);
- const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
+ ? kWithoutReadBarrier
+ : kCompilerReadBarrierOption;
bool generate_null_check = false;
switch (cls->GetLoadKind()) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -4361,16 +4366,16 @@
current_method,
ArtMethod::DeclaringClassOffset().Int32Value(),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ Ldr(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
// Add ADRP with its PC-relative type patch.
const DexFile& dex_file = cls->GetDexFile();
uint32_t type_index = cls->GetTypeIndex();
@@ -4383,7 +4388,7 @@
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
DCHECK(cls->GetAddress() != 0u && IsUint<32>(cls->GetAddress()));
__ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(cls->GetAddress()));
break;
@@ -4406,7 +4411,7 @@
out.X(),
offset,
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -4426,7 +4431,7 @@
out.X(),
/* offset placeholder */ 0,
ldr_label,
- requires_read_barrier);
+ read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -4443,7 +4448,7 @@
out.X(),
CodeGenerator::GetCacheOffset(cls->GetTypeIndex()),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -4580,7 +4585,7 @@
temp,
/* offset placeholder */ 0u,
ldr_label,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
SlowPathCodeARM64* slow_path =
new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load, temp, adrp_label);
codegen_->AddSlowPath(slow_path);
@@ -5282,14 +5287,15 @@
}
}
-void InstructionCodeGeneratorARM64::GenerateReferenceLoadOneRegister(HInstruction* instruction,
- Location out,
- uint32_t offset,
- Location maybe_temp,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorARM64::GenerateReferenceLoadOneRegister(
+ HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
Primitive::Type type = Primitive::kPrimNot;
Register out_reg = RegisterFrom(out, type);
- if (emit_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
CHECK(kEmitCompilerReadBarrier);
Register temp_reg = RegisterFrom(maybe_temp, type);
if (kUseBakerReadBarrier) {
@@ -5320,16 +5326,17 @@
}
}
-void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
- Location out,
- Location obj,
- uint32_t offset,
- Location maybe_temp,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters(
+ HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
Primitive::Type type = Primitive::kPrimNot;
Register out_reg = RegisterFrom(out, type);
Register obj_reg = RegisterFrom(obj, type);
- if (emit_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
CHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
@@ -5356,15 +5363,16 @@
}
}
-void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(HInstruction* instruction,
- Location root,
- Register obj,
- uint32_t offset,
- vixl::aarch64::Label* fixup_label,
- bool requires_read_barrier) {
+void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(
+ HInstruction* instruction,
+ Location root,
+ Register obj,
+ uint32_t offset,
+ vixl::aarch64::Label* fixup_label,
+ ReadBarrierOption read_barrier_option) {
DCHECK(fixup_label == nullptr || offset == 0u);
Register root_reg = RegisterFrom(root, Primitive::kPrimNot);
- if (requires_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
DCHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index e9a47b6..0e8d4fd 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -270,7 +270,7 @@
Location out,
uint32_t offset,
Location maybe_temp,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a heap reference load using two different registers
// `out` and `obj`:
//
@@ -286,18 +286,18 @@
Location obj,
uint32_t offset,
Location maybe_temp,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a GC root reference load:
//
// root <- *(obj + offset)
//
- // while honoring read barriers if `requires_read_barrier` is true.
+ // while honoring read barriers based on read_barrier_option.
void GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
vixl::aarch64::Register obj,
uint32_t offset,
vixl::aarch64::Label* fixup_label,
- bool requires_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a floating-point comparison.
void GenerateFcmp(HInstruction* instruction);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 20bb36d..2a9e21d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -6077,7 +6077,9 @@
Register out = out_loc.AsRegister<Register>();
bool generate_null_check = false;
- const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
+ ? kWithoutReadBarrier
+ : kCompilerReadBarrierOption;
switch (cls->GetLoadKind()) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
@@ -6089,24 +6091,24 @@
out_loc,
Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ movl(out, Immediate(/* placeholder */ 0));
codegen_->RecordTypePatch(cls);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
Register method_address = locations->InAt(0).AsRegister<Register>();
__ leal(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
codegen_->RecordTypePatch(cls);
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
DCHECK_NE(cls->GetAddress(), 0u);
uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
__ movl(out, Immediate(address));
@@ -6121,7 +6123,7 @@
out_loc,
Address::Absolute(address),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -6134,7 +6136,7 @@
out_loc,
Address(base_reg, CodeGeneratorX86::kDummy32BitOffset),
fixup_label,
- requires_read_barrier);
+ read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -6149,7 +6151,7 @@
out_loc,
Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -6287,7 +6289,7 @@
Address address = Address(method_address, CodeGeneratorX86::kDummy32BitOffset);
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kEmitCompilerReadBarrier);
+ GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
codegen_->AddSlowPath(slow_path);
__ testl(out, out);
@@ -6425,7 +6427,7 @@
out_loc,
obj_loc,
class_offset,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
@@ -6453,7 +6455,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -6487,7 +6489,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ testl(out, out);
__ j(kNotEqual, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
@@ -6516,7 +6518,7 @@
out_loc,
component_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -6653,6 +6655,9 @@
const uint32_t object_array_data_offset =
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
+ // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
+ // read barriers is done for performance and code size reasons.
bool is_type_check_slow_path_fatal =
IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
@@ -6676,7 +6681,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<Register>());
@@ -6696,7 +6701,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
@@ -6707,7 +6712,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception.
@@ -6731,7 +6736,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Walk over the class hierarchy to find a match.
NearLabel loop;
@@ -6749,7 +6754,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is not null, jump
// back at the beginning of the loop.
@@ -6766,7 +6771,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Do an exact check.
if (cls.IsRegister()) {
@@ -6783,7 +6788,7 @@
temp_loc,
component_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the component type is null (i.e. the object not an array), jump to the slow path to
// throw the exception. Otherwise proceed with the check.
@@ -6813,13 +6818,6 @@
// conditional flags and cause the conditional jump to be incorrect. Therefore we just jump
// to the slow path if we are running under poisoning.
if (!kPoisonHeapReferences) {
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction,
- temp_loc,
- obj_loc,
- class_offset,
- /*emit_read_barrier*/ false);
-
// Try to avoid read barriers to improve the fast path. We can not get false positives by
// doing this.
// /* HeapReference<Class> */ temp = obj->klass_
@@ -6827,14 +6825,14 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// /* HeapReference<Class> */ temp = temp->iftable_
GenerateReferenceLoadTwoRegisters(instruction,
temp_loc,
temp_loc,
iftable_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
NearLabel is_null;
// Null iftable means it is empty.
__ testl(temp, temp);
@@ -7015,13 +7013,15 @@
}
}
-void InstructionCodeGeneratorX86::GenerateReferenceLoadOneRegister(HInstruction* instruction,
- Location out,
- uint32_t offset,
- Location maybe_temp,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorX86::GenerateReferenceLoadOneRegister(
+ HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
Register out_reg = out.AsRegister<Register>();
- if (emit_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
+ CHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
@@ -7046,15 +7046,16 @@
}
}
-void InstructionCodeGeneratorX86::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
- Location out,
- Location obj,
- uint32_t offset,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorX86::GenerateReferenceLoadTwoRegisters(
+ HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ ReadBarrierOption read_barrier_option) {
Register out_reg = out.AsRegister<Register>();
Register obj_reg = obj.AsRegister<Register>();
- if (emit_read_barrier) {
- DCHECK(kEmitCompilerReadBarrier);
+ if (read_barrier_option == kWithReadBarrier) {
+ CHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -7074,13 +7075,14 @@
}
}
-void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad(HInstruction* instruction,
- Location root,
- const Address& address,
- Label* fixup_label,
- bool requires_read_barrier) {
+void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad(
+ HInstruction* instruction,
+ Location root,
+ const Address& address,
+ Label* fixup_label,
+ ReadBarrierOption read_barrier_option) {
Register root_reg = root.AsRegister<Register>();
- if (requires_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
DCHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 87295a4..164231b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -241,7 +241,7 @@
Location out,
uint32_t offset,
Location maybe_temp,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a heap reference load using two different registers
// `out` and `obj`:
//
@@ -256,17 +256,17 @@
Location out,
Location obj,
uint32_t offset,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a GC root reference load:
//
// root <- *address
//
- // while honoring read barriers if `requires_read_barrier` is true.
+ // while honoring read barriers based on read_barrier_option.
void GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
const Address& address,
Label* fixup_label,
- bool requires_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Push value to FPU stack. `is_fp` specifies whether the value is floating point or not.
// `is_wide` specifies whether it is long/double or not.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index a6dd0c1..cb89e50 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -5501,7 +5501,9 @@
Location out_loc = locations->Out();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
+ ? kWithoutReadBarrier
+ : kCompilerReadBarrierOption;
bool generate_null_check = false;
switch (cls->GetLoadKind()) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -5514,16 +5516,16 @@
out_loc,
Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
codegen_->RecordTypePatch(cls);
break;
case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
DCHECK_NE(cls->GetAddress(), 0u);
uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
__ movl(out, Immediate(address)); // Zero-extended.
@@ -5539,7 +5541,7 @@
out_loc,
address,
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
} else {
// TODO: Consider using opcode A1, i.e. movl eax, moff32 (with 64-bit address).
__ movq(out, Immediate(cls->GetAddress()));
@@ -5547,7 +5549,7 @@
out_loc,
Address(out, 0),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
}
generate_null_check = !cls->IsInDexCache();
break;
@@ -5558,7 +5560,7 @@
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
/* no_rip */ false);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, requires_read_barrier);
+ GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -5575,7 +5577,7 @@
out_loc,
Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -5688,7 +5690,7 @@
/* no_rip */ false);
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kEmitCompilerReadBarrier);
+ GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
codegen_->AddSlowPath(slow_path);
__ testl(out, out);
@@ -5829,7 +5831,7 @@
out_loc,
obj_loc,
class_offset,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
@@ -5862,7 +5864,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -5896,7 +5898,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ testl(out, out);
__ j(kNotEqual, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
@@ -5925,7 +5927,7 @@
out_loc,
component_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -6065,6 +6067,9 @@
const uint32_t object_array_data_offset =
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
+ // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
+ // read barriers is done for performance and code size reasons.
bool is_type_check_slow_path_fatal =
IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
SlowPathCode* type_check_slow_path =
@@ -6088,7 +6093,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<CpuRegister>());
} else {
@@ -6107,7 +6112,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
NearLabel loop;
@@ -6117,7 +6122,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception.
@@ -6140,7 +6145,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Walk over the class hierarchy to find a match.
NearLabel loop;
__ Bind(&loop);
@@ -6157,7 +6162,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is not null, jump
// back at the beginning of the loop.
@@ -6174,7 +6179,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Do an exact check.
NearLabel check_non_primitive_component_type;
if (cls.IsRegister()) {
@@ -6191,7 +6196,7 @@
temp_loc,
component_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the component type is not null (i.e. the object is indeed
// an array), jump to label `check_non_primitive_component_type`
@@ -6230,14 +6235,14 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// /* HeapReference<Class> */ temp = temp->iftable_
GenerateReferenceLoadTwoRegisters(instruction,
temp_loc,
temp_loc,
iftable_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
NearLabel is_null;
// Null iftable means it is empty.
__ testl(temp, temp);
@@ -6400,13 +6405,14 @@
}
}
-void InstructionCodeGeneratorX86_64::GenerateReferenceLoadOneRegister(HInstruction* instruction,
- Location out,
- uint32_t offset,
- Location maybe_temp,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorX86_64::GenerateReferenceLoadOneRegister(
+ HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
CpuRegister out_reg = out.AsRegister<CpuRegister>();
- if (emit_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
CHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
@@ -6432,14 +6438,15 @@
}
}
-void InstructionCodeGeneratorX86_64::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
- Location out,
- Location obj,
- uint32_t offset,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorX86_64::GenerateReferenceLoadTwoRegisters(
+ HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ ReadBarrierOption read_barrier_option) {
CpuRegister out_reg = out.AsRegister<CpuRegister>();
CpuRegister obj_reg = obj.AsRegister<CpuRegister>();
- if (emit_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
CHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
@@ -6460,13 +6467,14 @@
}
}
-void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad(HInstruction* instruction,
- Location root,
- const Address& address,
- Label* fixup_label,
- bool requires_read_barrier) {
+void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad(
+ HInstruction* instruction,
+ Location root,
+ const Address& address,
+ Label* fixup_label,
+ ReadBarrierOption read_barrier_option) {
CpuRegister root_reg = root.AsRegister<CpuRegister>();
- if (requires_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
DCHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 807a9f1..e5a4152 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -235,7 +235,7 @@
Location out,
uint32_t offset,
Location maybe_temp,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a heap reference load using two different registers
// `out` and `obj`:
//
@@ -250,17 +250,17 @@
Location out,
Location obj,
uint32_t offset,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a GC root reference load:
//
// root <- *address
//
- // while honoring read barriers if `requires_read_barrier` is true.
+ // while honoring read barriers based on read_barrier_option.
void GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
const Address& address,
Label* fixup_label,
- bool requires_read_barrier);
+ ReadBarrierOption read_barrier_option);
void PushOntoFPStack(Location source, uint32_t temp_offset,
uint32_t stack_adjustment, bool is_float);