Merge "Use entrypoint switching to reduce code size of GcRoot read barrier"
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index a81f24e..bf246ad 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -30,6 +30,7 @@
#include "memory_region.h"
#include "nodes.h"
#include "optimizing_compiler_stats.h"
+#include "read_barrier_option.h"
#include "stack_map_stream.h"
#include "utils/label.h"
@@ -50,6 +51,9 @@
// Maximum value for a primitive long.
static int64_t constexpr kPrimLongMax = INT64_C(0x7fffffffffffffff);
+static constexpr ReadBarrierOption kCompilerReadBarrierOption =
+ kEmitCompilerReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
+
class Assembler;
class CodeGenerator;
class CompilerDriver;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 74a74ee..7c72d00 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -5758,7 +5758,9 @@
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
- const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
+ ? kWithoutReadBarrier
+ : kCompilerReadBarrierOption;
bool generate_null_check = false;
switch (cls->GetLoadKind()) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -5770,17 +5772,17 @@
out_loc,
current_method,
ArtMethod::DeclaringClassOffset().Int32Value(),
- requires_read_barrier);
+ read_barrier_option);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ LoadLiteral(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
CodeGeneratorARM::PcRelativePatchInfo* labels =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
__ BindTrackedLabel(&labels->movw_label);
@@ -5792,7 +5794,7 @@
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
DCHECK_NE(cls->GetAddress(), 0u);
uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
__ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
@@ -5812,7 +5814,7 @@
uint32_t offset = address & MaxInt<uint32_t>(offset_bits);
__ LoadLiteral(out, codegen_->DeduplicateDexCacheAddressLiteral(base_address));
// /* GcRoot<mirror::Class> */ out = *(base_address + offset)
- GenerateGcRootFieldLoad(cls, out_loc, out, offset, requires_read_barrier);
+ GenerateGcRootFieldLoad(cls, out_loc, out, offset, read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -5821,7 +5823,7 @@
HArmDexCacheArraysBase* base = cls->InputAt(0)->AsArmDexCacheArraysBase();
int32_t offset = cls->GetDexCacheElementOffset() - base->GetElementOffset();
// /* GcRoot<mirror::Class> */ out = *(dex_cache_arrays_base + offset)
- GenerateGcRootFieldLoad(cls, out_loc, base_reg, offset, requires_read_barrier);
+ GenerateGcRootFieldLoad(cls, out_loc, base_reg, offset, read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -5835,7 +5837,7 @@
ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
// /* GcRoot<mirror::Class> */ out = out[type_index]
size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex());
- GenerateGcRootFieldLoad(cls, out_loc, out, offset, requires_read_barrier);
+ GenerateGcRootFieldLoad(cls, out_loc, out, offset, read_barrier_option);
generate_null_check = !cls->IsInDexCache();
}
}
@@ -5978,7 +5980,7 @@
__ movt(temp, /* placeholder */ 0u);
__ BindTrackedLabel(&labels->add_pc_label);
__ add(temp, temp, ShifterOperand(PC));
- GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kEmitCompilerReadBarrier);
+ GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
codegen_->AddSlowPath(slow_path);
__ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
@@ -6118,7 +6120,7 @@
obj_loc,
class_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
@@ -6140,7 +6142,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
// If `out` is null, we use it for the result, and jump to `done`.
__ CompareAndBranchIfZero(out, &done);
__ cmp(out, ShifterOperand(cls));
@@ -6163,7 +6165,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ CompareAndBranchIfNonZero(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ b(&done);
@@ -6186,7 +6188,7 @@
out_loc,
component_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
// If `out` is null, we use it for the result, and jump to `done`.
__ CompareAndBranchIfZero(out, &done);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
@@ -6306,6 +6308,9 @@
const uint32_t object_array_data_offset =
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
+ // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
+ // read barriers is done for performance and code size reasons.
bool is_type_check_slow_path_fatal = false;
if (!kEmitCompilerReadBarrier) {
is_type_check_slow_path_fatal =
@@ -6335,7 +6340,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
__ cmp(temp, ShifterOperand(cls));
// Jump to slow path for throwing the exception or doing a
@@ -6351,7 +6356,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
@@ -6362,7 +6367,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception.
@@ -6381,7 +6386,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Walk over the class hierarchy to find a match.
Label loop;
@@ -6394,7 +6399,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception.
@@ -6411,7 +6416,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Do an exact check.
__ cmp(temp, ShifterOperand(cls));
@@ -6423,7 +6428,7 @@
temp_loc,
component_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the component type is null, jump to the slow path to throw the exception.
__ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel());
// Otherwise,the object is indeed an array, jump to label `check_non_primitive_component_type`
@@ -6456,7 +6461,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// /* HeapReference<Class> */ temp = temp->iftable_
GenerateReferenceLoadTwoRegisters(instruction,
@@ -6464,7 +6469,7 @@
temp_loc,
iftable_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
Label is_null;
// Null iftable means it is empty and will always fail the check.
// Not cbz since the temp may not be a low register.
@@ -6764,13 +6769,14 @@
}
}
-void InstructionCodeGeneratorARM::GenerateReferenceLoadOneRegister(HInstruction* instruction,
- Location out,
- uint32_t offset,
- Location maybe_temp,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorARM::GenerateReferenceLoadOneRegister(
+ HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
Register out_reg = out.AsRegister<Register>();
- if (emit_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
CHECK(kEmitCompilerReadBarrier);
DCHECK(maybe_temp.IsRegister()) << maybe_temp;
if (kUseBakerReadBarrier) {
@@ -6796,15 +6802,16 @@
}
}
-void InstructionCodeGeneratorARM::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
- Location out,
- Location obj,
- uint32_t offset,
- Location maybe_temp,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorARM::GenerateReferenceLoadTwoRegisters(
+ HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
Register out_reg = out.AsRegister<Register>();
Register obj_reg = obj.AsRegister<Register>();
- if (emit_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
CHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
DCHECK(maybe_temp.IsRegister()) << maybe_temp;
@@ -6830,9 +6837,9 @@
Location root,
Register obj,
uint32_t offset,
- bool requires_read_barrier) {
+ ReadBarrierOption read_barrier_option) {
Register root_reg = root.AsRegister<Register>();
- if (requires_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
DCHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index e953df8..f95dd57 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -264,7 +264,7 @@
Location out,
uint32_t offset,
Location maybe_temp,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a heap reference load using two different registers
// `out` and `obj`:
//
@@ -280,17 +280,17 @@
Location obj,
uint32_t offset,
Location maybe_temp,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a GC root reference load:
//
// root <- *(obj + offset)
//
- // while honoring read barriers if `requires_read_barrier` is true.
+ // while honoring read barriers based on read_barrier_option.
void GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
Register obj,
uint32_t offset,
- bool requires_read_barrier);
+ ReadBarrierOption read_barrier_option);
void GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
Label* true_target,
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 7860138..35b1605 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -3433,7 +3433,7 @@
obj_loc,
class_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
@@ -3455,7 +3455,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
// If `out` is null, we use it for the result, and jump to `done`.
__ Cbz(out, &done);
__ Cmp(out, cls);
@@ -3478,7 +3478,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ Cbnz(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ B(&done);
@@ -3501,7 +3501,7 @@
out_loc,
component_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
// If `out` is null, we use it for the result, and jump to `done`.
__ Cbz(out, &done);
__ Ldrh(out, HeapOperand(out, primitive_offset));
@@ -3624,6 +3624,9 @@
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
bool is_type_check_slow_path_fatal = false;
+ // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
+ // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
+ // read barriers is done for performance and code size reasons.
if (!kEmitCompilerReadBarrier) {
is_type_check_slow_path_fatal =
(type_check_kind == TypeCheckKind::kExactCheck ||
@@ -3652,7 +3655,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
__ Cmp(temp, cls);
// Jump to slow path for throwing the exception or doing a
@@ -3668,7 +3671,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
@@ -3679,7 +3682,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception.
@@ -3697,7 +3700,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Walk over the class hierarchy to find a match.
vixl::aarch64::Label loop;
@@ -3710,7 +3713,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is not null, jump
// back at the beginning of the loop.
@@ -3727,7 +3730,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Do an exact check.
__ Cmp(temp, cls);
@@ -3739,7 +3742,7 @@
temp_loc,
component_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the component type is null, jump to the slow path to throw the exception.
__ Cbz(temp, type_check_slow_path->GetEntryLabel());
@@ -3770,7 +3773,7 @@
obj_loc,
class_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// /* HeapReference<Class> */ temp = temp->iftable_
GenerateReferenceLoadTwoRegisters(instruction,
@@ -3778,7 +3781,7 @@
temp_loc,
iftable_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
vixl::aarch64::Label is_null;
// Null iftable means it is empty and will always fail the check.
__ Cbz(temp, &is_null);
@@ -4363,7 +4366,9 @@
Location out_loc = cls->GetLocations()->Out();
Register out = OutputRegister(cls);
- const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
+ ? kWithoutReadBarrier
+ : kCompilerReadBarrierOption;
bool generate_null_check = false;
switch (cls->GetLoadKind()) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -4376,16 +4381,16 @@
current_method,
ArtMethod::DeclaringClassOffset().Int32Value(),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ Ldr(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
// Add ADRP with its PC-relative type patch.
const DexFile& dex_file = cls->GetDexFile();
uint32_t type_index = cls->GetTypeIndex();
@@ -4398,7 +4403,7 @@
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
DCHECK(cls->GetAddress() != 0u && IsUint<32>(cls->GetAddress()));
__ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(cls->GetAddress()));
break;
@@ -4421,7 +4426,7 @@
out.X(),
offset,
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -4441,7 +4446,7 @@
out.X(),
/* offset placeholder */ 0,
ldr_label,
- requires_read_barrier);
+ read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -4458,7 +4463,7 @@
out.X(),
CodeGenerator::GetCacheOffset(cls->GetTypeIndex()),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -4595,7 +4600,7 @@
temp,
/* offset placeholder */ 0u,
ldr_label,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
SlowPathCodeARM64* slow_path =
new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load, temp, adrp_label);
codegen_->AddSlowPath(slow_path);
@@ -5297,14 +5302,15 @@
}
}
-void InstructionCodeGeneratorARM64::GenerateReferenceLoadOneRegister(HInstruction* instruction,
- Location out,
- uint32_t offset,
- Location maybe_temp,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorARM64::GenerateReferenceLoadOneRegister(
+ HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
Primitive::Type type = Primitive::kPrimNot;
Register out_reg = RegisterFrom(out, type);
- if (emit_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
CHECK(kEmitCompilerReadBarrier);
Register temp_reg = RegisterFrom(maybe_temp, type);
if (kUseBakerReadBarrier) {
@@ -5335,16 +5341,17 @@
}
}
-void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
- Location out,
- Location obj,
- uint32_t offset,
- Location maybe_temp,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters(
+ HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
Primitive::Type type = Primitive::kPrimNot;
Register out_reg = RegisterFrom(out, type);
Register obj_reg = RegisterFrom(obj, type);
- if (emit_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
CHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
@@ -5371,15 +5378,16 @@
}
}
-void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(HInstruction* instruction,
- Location root,
- Register obj,
- uint32_t offset,
- vixl::aarch64::Label* fixup_label,
- bool requires_read_barrier) {
+void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(
+ HInstruction* instruction,
+ Location root,
+ Register obj,
+ uint32_t offset,
+ vixl::aarch64::Label* fixup_label,
+ ReadBarrierOption read_barrier_option) {
DCHECK(fixup_label == nullptr || offset == 0u);
Register root_reg = RegisterFrom(root, Primitive::kPrimNot);
- if (requires_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
DCHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index e9a47b6..0e8d4fd 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -270,7 +270,7 @@
Location out,
uint32_t offset,
Location maybe_temp,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a heap reference load using two different registers
// `out` and `obj`:
//
@@ -286,18 +286,18 @@
Location obj,
uint32_t offset,
Location maybe_temp,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a GC root reference load:
//
// root <- *(obj + offset)
//
- // while honoring read barriers if `requires_read_barrier` is true.
+ // while honoring read barriers based on read_barrier_option.
void GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
vixl::aarch64::Register obj,
uint32_t offset,
vixl::aarch64::Label* fixup_label,
- bool requires_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a floating-point comparison.
void GenerateFcmp(HInstruction* instruction);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 20bb36d..2a9e21d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -6077,7 +6077,9 @@
Register out = out_loc.AsRegister<Register>();
bool generate_null_check = false;
- const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
+ ? kWithoutReadBarrier
+ : kCompilerReadBarrierOption;
switch (cls->GetLoadKind()) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
@@ -6089,24 +6091,24 @@
out_loc,
Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ movl(out, Immediate(/* placeholder */ 0));
codegen_->RecordTypePatch(cls);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
Register method_address = locations->InAt(0).AsRegister<Register>();
__ leal(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
codegen_->RecordTypePatch(cls);
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
DCHECK_NE(cls->GetAddress(), 0u);
uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
__ movl(out, Immediate(address));
@@ -6121,7 +6123,7 @@
out_loc,
Address::Absolute(address),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -6134,7 +6136,7 @@
out_loc,
Address(base_reg, CodeGeneratorX86::kDummy32BitOffset),
fixup_label,
- requires_read_barrier);
+ read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -6149,7 +6151,7 @@
out_loc,
Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -6287,7 +6289,7 @@
Address address = Address(method_address, CodeGeneratorX86::kDummy32BitOffset);
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kEmitCompilerReadBarrier);
+ GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
codegen_->AddSlowPath(slow_path);
__ testl(out, out);
@@ -6425,7 +6427,7 @@
out_loc,
obj_loc,
class_offset,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
@@ -6453,7 +6455,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -6487,7 +6489,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ testl(out, out);
__ j(kNotEqual, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
@@ -6516,7 +6518,7 @@
out_loc,
component_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -6653,6 +6655,9 @@
const uint32_t object_array_data_offset =
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
+ // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
+ // read barriers is done for performance and code size reasons.
bool is_type_check_slow_path_fatal =
IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
@@ -6676,7 +6681,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<Register>());
@@ -6696,7 +6701,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
@@ -6707,7 +6712,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception.
@@ -6731,7 +6736,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Walk over the class hierarchy to find a match.
NearLabel loop;
@@ -6749,7 +6754,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is not null, jump
// back at the beginning of the loop.
@@ -6766,7 +6771,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Do an exact check.
if (cls.IsRegister()) {
@@ -6783,7 +6788,7 @@
temp_loc,
component_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the component type is null (i.e. the object not an array), jump to the slow path to
// throw the exception. Otherwise proceed with the check.
@@ -6813,13 +6818,6 @@
// conditional flags and cause the conditional jump to be incorrect. Therefore we just jump
// to the slow path if we are running under poisoning.
if (!kPoisonHeapReferences) {
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction,
- temp_loc,
- obj_loc,
- class_offset,
- /*emit_read_barrier*/ false);
-
// Try to avoid read barriers to improve the fast path. We can not get false positives by
// doing this.
// /* HeapReference<Class> */ temp = obj->klass_
@@ -6827,14 +6825,14 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// /* HeapReference<Class> */ temp = temp->iftable_
GenerateReferenceLoadTwoRegisters(instruction,
temp_loc,
temp_loc,
iftable_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
NearLabel is_null;
// Null iftable means it is empty.
__ testl(temp, temp);
@@ -7015,13 +7013,15 @@
}
}
-void InstructionCodeGeneratorX86::GenerateReferenceLoadOneRegister(HInstruction* instruction,
- Location out,
- uint32_t offset,
- Location maybe_temp,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorX86::GenerateReferenceLoadOneRegister(
+ HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
Register out_reg = out.AsRegister<Register>();
- if (emit_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
+ CHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
@@ -7046,15 +7046,16 @@
}
}
-void InstructionCodeGeneratorX86::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
- Location out,
- Location obj,
- uint32_t offset,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorX86::GenerateReferenceLoadTwoRegisters(
+ HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ ReadBarrierOption read_barrier_option) {
Register out_reg = out.AsRegister<Register>();
Register obj_reg = obj.AsRegister<Register>();
- if (emit_read_barrier) {
- DCHECK(kEmitCompilerReadBarrier);
+ if (read_barrier_option == kWithReadBarrier) {
+ CHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -7074,13 +7075,14 @@
}
}
-void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad(HInstruction* instruction,
- Location root,
- const Address& address,
- Label* fixup_label,
- bool requires_read_barrier) {
+void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad(
+ HInstruction* instruction,
+ Location root,
+ const Address& address,
+ Label* fixup_label,
+ ReadBarrierOption read_barrier_option) {
Register root_reg = root.AsRegister<Register>();
- if (requires_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
DCHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 87295a4..164231b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -241,7 +241,7 @@
Location out,
uint32_t offset,
Location maybe_temp,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a heap reference load using two different registers
// `out` and `obj`:
//
@@ -256,17 +256,17 @@
Location out,
Location obj,
uint32_t offset,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a GC root reference load:
//
// root <- *address
//
- // while honoring read barriers if `requires_read_barrier` is true.
+ // while honoring read barriers based on read_barrier_option.
void GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
const Address& address,
Label* fixup_label,
- bool requires_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Push value to FPU stack. `is_fp` specifies whether the value is floating point or not.
// `is_wide` specifies whether it is long/double or not.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index a6dd0c1..cb89e50 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -5501,7 +5501,9 @@
Location out_loc = locations->Out();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
+ ? kWithoutReadBarrier
+ : kCompilerReadBarrierOption;
bool generate_null_check = false;
switch (cls->GetLoadKind()) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -5514,16 +5516,16 @@
out_loc,
Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
codegen_->RecordTypePatch(cls);
break;
case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK(!requires_read_barrier);
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
DCHECK_NE(cls->GetAddress(), 0u);
uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
__ movl(out, Immediate(address)); // Zero-extended.
@@ -5539,7 +5541,7 @@
out_loc,
address,
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
} else {
// TODO: Consider using opcode A1, i.e. movl eax, moff32 (with 64-bit address).
__ movq(out, Immediate(cls->GetAddress()));
@@ -5547,7 +5549,7 @@
out_loc,
Address(out, 0),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
}
generate_null_check = !cls->IsInDexCache();
break;
@@ -5558,7 +5560,7 @@
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
/* no_rip */ false);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, requires_read_barrier);
+ GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -5575,7 +5577,7 @@
out_loc,
Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())),
/* fixup_label */ nullptr,
- requires_read_barrier);
+ read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -5688,7 +5690,7 @@
/* no_rip */ false);
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kEmitCompilerReadBarrier);
+ GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
codegen_->AddSlowPath(slow_path);
__ testl(out, out);
@@ -5829,7 +5831,7 @@
out_loc,
obj_loc,
class_offset,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
@@ -5862,7 +5864,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -5896,7 +5898,7 @@
out_loc,
super_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ testl(out, out);
__ j(kNotEqual, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
@@ -5925,7 +5927,7 @@
out_loc,
component_offset,
maybe_temp_loc,
- kEmitCompilerReadBarrier);
+ kCompilerReadBarrierOption);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -6065,6 +6067,9 @@
const uint32_t object_array_data_offset =
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
+ // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
+ // read barriers is done for performance and code size reasons.
bool is_type_check_slow_path_fatal =
IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
SlowPathCode* type_check_slow_path =
@@ -6088,7 +6093,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<CpuRegister>());
} else {
@@ -6107,7 +6112,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
NearLabel loop;
@@ -6117,7 +6122,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is null, jump to the slow path to throw the
// exception.
@@ -6140,7 +6145,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Walk over the class hierarchy to find a match.
NearLabel loop;
__ Bind(&loop);
@@ -6157,7 +6162,7 @@
temp_loc,
super_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the class reference currently in `temp` is not null, jump
// back at the beginning of the loop.
@@ -6174,7 +6179,7 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// Do an exact check.
NearLabel check_non_primitive_component_type;
if (cls.IsRegister()) {
@@ -6191,7 +6196,7 @@
temp_loc,
component_offset,
maybe_temp2_loc,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// If the component type is not null (i.e. the object is indeed
// an array), jump to label `check_non_primitive_component_type`
@@ -6230,14 +6235,14 @@
temp_loc,
obj_loc,
class_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
// /* HeapReference<Class> */ temp = temp->iftable_
GenerateReferenceLoadTwoRegisters(instruction,
temp_loc,
temp_loc,
iftable_offset,
- /*emit_read_barrier*/ false);
+ kWithoutReadBarrier);
NearLabel is_null;
// Null iftable means it is empty.
__ testl(temp, temp);
@@ -6400,13 +6405,14 @@
}
}
-void InstructionCodeGeneratorX86_64::GenerateReferenceLoadOneRegister(HInstruction* instruction,
- Location out,
- uint32_t offset,
- Location maybe_temp,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorX86_64::GenerateReferenceLoadOneRegister(
+ HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location maybe_temp,
+ ReadBarrierOption read_barrier_option) {
CpuRegister out_reg = out.AsRegister<CpuRegister>();
- if (emit_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
CHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
@@ -6432,14 +6438,15 @@
}
}
-void InstructionCodeGeneratorX86_64::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
- Location out,
- Location obj,
- uint32_t offset,
- bool emit_read_barrier) {
+void InstructionCodeGeneratorX86_64::GenerateReferenceLoadTwoRegisters(
+ HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ ReadBarrierOption read_barrier_option) {
CpuRegister out_reg = out.AsRegister<CpuRegister>();
CpuRegister obj_reg = obj.AsRegister<CpuRegister>();
- if (emit_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
CHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
@@ -6460,13 +6467,14 @@
}
}
-void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad(HInstruction* instruction,
- Location root,
- const Address& address,
- Label* fixup_label,
- bool requires_read_barrier) {
+void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad(
+ HInstruction* instruction,
+ Location root,
+ const Address& address,
+ Label* fixup_label,
+ ReadBarrierOption read_barrier_option) {
CpuRegister root_reg = root.AsRegister<CpuRegister>();
- if (requires_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
DCHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 807a9f1..e5a4152 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -235,7 +235,7 @@
Location out,
uint32_t offset,
Location maybe_temp,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a heap reference load using two different registers
// `out` and `obj`:
//
@@ -250,17 +250,17 @@
Location out,
Location obj,
uint32_t offset,
- bool emit_read_barrier);
+ ReadBarrierOption read_barrier_option);
// Generate a GC root reference load:
//
// root <- *address
//
- // while honoring read barriers if `requires_read_barrier` is true.
+ // while honoring read barriers based on read_barrier_option.
void GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
const Address& address,
Label* fixup_label,
- bool requires_read_barrier);
+ ReadBarrierOption read_barrier_option);
void PushOntoFPStack(Location source, uint32_t temp_offset,
uint32_t stack_adjustment, bool is_float);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 9e81623..7fe54b9 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1226,12 +1226,22 @@
// Skip the entry block, it does not contain instructions that prevent inlining.
for (HBasicBlock* block : callee_graph->GetReversePostOrderSkipEntryBlock()) {
- if (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible()) {
- // Don't inline methods with irreducible loops, they could prevent some
- // optimizations to run.
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it contains an irreducible loop";
- return false;
+ if (block->IsLoopHeader()) {
+ if (block->GetLoopInformation()->IsIrreducible()) {
+ // Don't inline methods with irreducible loops, they could prevent some
+ // optimizations to run.
+ VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because it contains an irreducible loop";
+ return false;
+ }
+ if (!block->GetLoopInformation()->HasExitEdge()) {
+ // Don't inline methods with loops without exit, since they cause the
+ // loop information to be computed incorrectly when updating after
+ // inlining.
+ VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because it contains a loop with no exit";
+ return false;
+ }
}
for (HInstructionIterator instr_it(block->GetInstructions());
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 9155322..680381a 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -735,6 +735,20 @@
return true;
}
+
+bool HLoopInformation::HasExitEdge() const {
+ // Determine if this loop has at least one exit edge.
+ HBlocksInLoopReversePostOrderIterator it_loop(*this);
+ for (; !it_loop.Done(); it_loop.Advance()) {
+ for (HBasicBlock* successor : it_loop.Current()->GetSuccessors()) {
+ if (!Contains(*successor)) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
bool HBasicBlock::Dominates(HBasicBlock* other) const {
// Walk up the dominator tree from `other`, to find out if `this`
// is an ancestor.
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 883ac65..e0c582a 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -769,6 +769,8 @@
bool DominatesAllBackEdges(HBasicBlock* block);
+ bool HasExitEdge() const;
+
private:
// Internal recursive implementation of `Populate`.
void PopulateRecursive(HBasicBlock* block);
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index 9d4b554..6480843 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -61,20 +61,38 @@
EventHandler gEventHandler;
ObjectTagTable gObjectTagTable(&gEventHandler);
+#define ENSURE_NON_NULL(n) \
+ do { \
+ if ((n) == nullptr) { \
+ return ERR(NULL_POINTER); \
+ } \
+ } while (false)
+
class JvmtiFunctions {
private:
static bool IsValidEnv(jvmtiEnv* env) {
return env != nullptr;
}
+#define ENSURE_VALID_ENV(env) \
+ do { \
+ if (!IsValidEnv(env)) { \
+ return ERR(INVALID_ENVIRONMENT); \
+ } \
+ } while (false)
+
+#define ENSURE_HAS_CAP(env, cap) \
+ do { \
+ ENSURE_VALID_ENV(env); \
+ if (ArtJvmTiEnv::AsArtJvmTiEnv(env)->capabilities.cap != 1) { \
+ return ERR(MUST_POSSESS_CAPABILITY); \
+ } \
+ } while (false)
+
public:
static jvmtiError Allocate(jvmtiEnv* env, jlong size, unsigned char** mem_ptr) {
- if (!IsValidEnv(env)) {
- return ERR(INVALID_ENVIRONMENT);
- }
- if (mem_ptr == nullptr) {
- return ERR(NULL_POINTER);
- }
+ ENSURE_VALID_ENV(env);
+ ENSURE_NON_NULL(mem_ptr);
if (size < 0) {
return ERR(ILLEGAL_ARGUMENT);
} else if (size == 0) {
@@ -86,9 +104,7 @@
}
static jvmtiError Deallocate(jvmtiEnv* env, unsigned char* mem) {
- if (!IsValidEnv(env)) {
- return ERR(INVALID_ENVIRONMENT);
- }
+ ENSURE_VALID_ENV(env);
if (mem != nullptr) {
free(mem);
}
@@ -158,7 +174,7 @@
static jvmtiError GetCurrentContendedMonitor(jvmtiEnv* env,
jthread thread,
jobject* monitor_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return ERR(NOT_IMPLEMENTED);
}
static jvmtiError RunAgentThread(jvmtiEnv* env,
@@ -291,14 +307,13 @@
jclass klass,
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
+ ENSURE_HAS_CAP(env, can_tag_objects);
HeapUtil heap_util(&gObjectTagTable);
return heap_util.IterateThroughHeap(env, heap_filter, klass, callbacks, user_data);
}
static jvmtiError GetTag(jvmtiEnv* env, jobject object, jlong* tag_ptr) {
- if (object == nullptr || tag_ptr == nullptr) {
- return ERR(NULL_POINTER);
- }
+ ENSURE_HAS_CAP(env, can_tag_objects);
JNIEnv* jni_env = GetJniEnv(env);
if (jni_env == nullptr) {
@@ -315,6 +330,8 @@
}
static jvmtiError SetTag(jvmtiEnv* env, jobject object, jlong tag) {
+ ENSURE_HAS_CAP(env, can_tag_objects);
+
if (object == nullptr) {
return ERR(NULL_POINTER);
}
@@ -337,6 +354,8 @@
jint* count_ptr,
jobject** object_result_ptr,
jlong** tag_result_ptr) {
+ ENSURE_HAS_CAP(env, can_tag_objects);
+
JNIEnv* jni_env = GetJniEnv(env);
if (jni_env == nullptr) {
return ERR(INTERNAL);
@@ -765,9 +784,7 @@
static jvmtiError SetEventCallbacks(jvmtiEnv* env,
const jvmtiEventCallbacks* callbacks,
jint size_of_callbacks) {
- if (env == nullptr) {
- return ERR(NULL_POINTER);
- }
+ ENSURE_VALID_ENV(env);
if (size_of_callbacks < 0) {
return ERR(ILLEGAL_ARGUMENT);
}
@@ -794,6 +811,8 @@
jvmtiEvent event_type,
jthread event_thread,
...) {
+ ENSURE_VALID_ENV(env);
+ // TODO: Check for capabilities.
art::Thread* art_thread = nullptr;
if (event_thread != nullptr) {
// TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
@@ -834,20 +853,136 @@
}
static jvmtiError GetPotentialCapabilities(jvmtiEnv* env, jvmtiCapabilities* capabilities_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ ENSURE_VALID_ENV(env);
+ ENSURE_NON_NULL(capabilities_ptr);
+ *capabilities_ptr = kPotentialCapabilities;
+ return OK;
}
static jvmtiError AddCapabilities(jvmtiEnv* env, const jvmtiCapabilities* capabilities_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ ENSURE_VALID_ENV(env);
+ ENSURE_NON_NULL(capabilities_ptr);
+ ArtJvmTiEnv* art_env = static_cast<ArtJvmTiEnv*>(env);
+ jvmtiError ret = OK;
+#define ADD_CAPABILITY(e) \
+ do { \
+ if (capabilities_ptr->e == 1) { \
+ if (kPotentialCapabilities.e == 1) { \
+ art_env->capabilities.e = 1;\
+ } else { \
+ ret = ERR(NOT_AVAILABLE); \
+ } \
+ } \
+ } while (false)
+
+ ADD_CAPABILITY(can_tag_objects);
+ ADD_CAPABILITY(can_generate_field_modification_events);
+ ADD_CAPABILITY(can_generate_field_access_events);
+ ADD_CAPABILITY(can_get_bytecodes);
+ ADD_CAPABILITY(can_get_synthetic_attribute);
+ ADD_CAPABILITY(can_get_owned_monitor_info);
+ ADD_CAPABILITY(can_get_current_contended_monitor);
+ ADD_CAPABILITY(can_get_monitor_info);
+ ADD_CAPABILITY(can_pop_frame);
+ ADD_CAPABILITY(can_redefine_classes);
+ ADD_CAPABILITY(can_signal_thread);
+ ADD_CAPABILITY(can_get_source_file_name);
+ ADD_CAPABILITY(can_get_line_numbers);
+ ADD_CAPABILITY(can_get_source_debug_extension);
+ ADD_CAPABILITY(can_access_local_variables);
+ ADD_CAPABILITY(can_maintain_original_method_order);
+ ADD_CAPABILITY(can_generate_single_step_events);
+ ADD_CAPABILITY(can_generate_exception_events);
+ ADD_CAPABILITY(can_generate_frame_pop_events);
+ ADD_CAPABILITY(can_generate_breakpoint_events);
+ ADD_CAPABILITY(can_suspend);
+ ADD_CAPABILITY(can_redefine_any_class);
+ ADD_CAPABILITY(can_get_current_thread_cpu_time);
+ ADD_CAPABILITY(can_get_thread_cpu_time);
+ ADD_CAPABILITY(can_generate_method_entry_events);
+ ADD_CAPABILITY(can_generate_method_exit_events);
+ ADD_CAPABILITY(can_generate_all_class_hook_events);
+ ADD_CAPABILITY(can_generate_compiled_method_load_events);
+ ADD_CAPABILITY(can_generate_monitor_events);
+ ADD_CAPABILITY(can_generate_vm_object_alloc_events);
+ ADD_CAPABILITY(can_generate_native_method_bind_events);
+ ADD_CAPABILITY(can_generate_garbage_collection_events);
+ ADD_CAPABILITY(can_generate_object_free_events);
+ ADD_CAPABILITY(can_force_early_return);
+ ADD_CAPABILITY(can_get_owned_monitor_stack_depth_info);
+ ADD_CAPABILITY(can_get_constant_pool);
+ ADD_CAPABILITY(can_set_native_method_prefix);
+ ADD_CAPABILITY(can_retransform_classes);
+ ADD_CAPABILITY(can_retransform_any_class);
+ ADD_CAPABILITY(can_generate_resource_exhaustion_heap_events);
+ ADD_CAPABILITY(can_generate_resource_exhaustion_threads_events);
+#undef ADD_CAPABILITY
+ return ret;
}
static jvmtiError RelinquishCapabilities(jvmtiEnv* env,
const jvmtiCapabilities* capabilities_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ ENSURE_VALID_ENV(env);
+ ENSURE_NON_NULL(capabilities_ptr);
+ ArtJvmTiEnv* art_env = reinterpret_cast<ArtJvmTiEnv*>(env);
+#define DEL_CAPABILITY(e) \
+ do { \
+ if (capabilities_ptr->e == 1) { \
+ art_env->capabilities.e = 0;\
+ } \
+ } while (false)
+
+ DEL_CAPABILITY(can_tag_objects);
+ DEL_CAPABILITY(can_generate_field_modification_events);
+ DEL_CAPABILITY(can_generate_field_access_events);
+ DEL_CAPABILITY(can_get_bytecodes);
+ DEL_CAPABILITY(can_get_synthetic_attribute);
+ DEL_CAPABILITY(can_get_owned_monitor_info);
+ DEL_CAPABILITY(can_get_current_contended_monitor);
+ DEL_CAPABILITY(can_get_monitor_info);
+ DEL_CAPABILITY(can_pop_frame);
+ DEL_CAPABILITY(can_redefine_classes);
+ DEL_CAPABILITY(can_signal_thread);
+ DEL_CAPABILITY(can_get_source_file_name);
+ DEL_CAPABILITY(can_get_line_numbers);
+ DEL_CAPABILITY(can_get_source_debug_extension);
+ DEL_CAPABILITY(can_access_local_variables);
+ DEL_CAPABILITY(can_maintain_original_method_order);
+ DEL_CAPABILITY(can_generate_single_step_events);
+ DEL_CAPABILITY(can_generate_exception_events);
+ DEL_CAPABILITY(can_generate_frame_pop_events);
+ DEL_CAPABILITY(can_generate_breakpoint_events);
+ DEL_CAPABILITY(can_suspend);
+ DEL_CAPABILITY(can_redefine_any_class);
+ DEL_CAPABILITY(can_get_current_thread_cpu_time);
+ DEL_CAPABILITY(can_get_thread_cpu_time);
+ DEL_CAPABILITY(can_generate_method_entry_events);
+ DEL_CAPABILITY(can_generate_method_exit_events);
+ DEL_CAPABILITY(can_generate_all_class_hook_events);
+ DEL_CAPABILITY(can_generate_compiled_method_load_events);
+ DEL_CAPABILITY(can_generate_monitor_events);
+ DEL_CAPABILITY(can_generate_vm_object_alloc_events);
+ DEL_CAPABILITY(can_generate_native_method_bind_events);
+ DEL_CAPABILITY(can_generate_garbage_collection_events);
+ DEL_CAPABILITY(can_generate_object_free_events);
+ DEL_CAPABILITY(can_force_early_return);
+ DEL_CAPABILITY(can_get_owned_monitor_stack_depth_info);
+ DEL_CAPABILITY(can_get_constant_pool);
+ DEL_CAPABILITY(can_set_native_method_prefix);
+ DEL_CAPABILITY(can_retransform_classes);
+ DEL_CAPABILITY(can_retransform_any_class);
+ DEL_CAPABILITY(can_generate_resource_exhaustion_heap_events);
+ DEL_CAPABILITY(can_generate_resource_exhaustion_threads_events);
+#undef DEL_CAPABILITY
+ return OK;
}
static jvmtiError GetCapabilities(jvmtiEnv* env, jvmtiCapabilities* capabilities_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ ENSURE_VALID_ENV(env);
+ ENSURE_NON_NULL(capabilities_ptr);
+ ArtJvmTiEnv* artenv = reinterpret_cast<ArtJvmTiEnv*>(env);
+ *capabilities_ptr = artenv->capabilities;
+ return OK;
}
static jvmtiError GetCurrentThreadCpuTimerInfo(jvmtiEnv* env, jvmtiTimerInfo* info_ptr) {
@@ -903,44 +1038,31 @@
}
static jvmtiError DisposeEnvironment(jvmtiEnv* env) {
- if (!IsValidEnv(env)) {
- return ERR(INVALID_ENVIRONMENT);
- }
+ ENSURE_VALID_ENV(env);
delete env;
return OK;
}
static jvmtiError SetEnvironmentLocalStorage(jvmtiEnv* env, const void* data) {
- if (!IsValidEnv(env)) {
- return ERR(INVALID_ENVIRONMENT);
- }
+ ENSURE_VALID_ENV(env);
reinterpret_cast<ArtJvmTiEnv*>(env)->local_data = const_cast<void*>(data);
return OK;
}
static jvmtiError GetEnvironmentLocalStorage(jvmtiEnv* env, void** data_ptr) {
- if (!IsValidEnv(env)) {
- return ERR(INVALID_ENVIRONMENT);
- }
+ ENSURE_VALID_ENV(env);
*data_ptr = reinterpret_cast<ArtJvmTiEnv*>(env)->local_data;
return OK;
}
static jvmtiError GetVersionNumber(jvmtiEnv* env, jint* version_ptr) {
- if (!IsValidEnv(env)) {
- return ERR(INVALID_ENVIRONMENT);
- }
+ ENSURE_VALID_ENV(env);
*version_ptr = JVMTI_VERSION;
return OK;
}
static jvmtiError GetErrorName(jvmtiEnv* env, jvmtiError error, char** name_ptr) {
- if (!IsValidEnv(env)) {
- return ERR(INVALID_ENVIRONMENT);
- }
- if (name_ptr == nullptr) {
- return ERR(NULL_POINTER);
- }
+ ENSURE_NON_NULL(name_ptr);
switch (error) {
#define ERROR_CASE(e) case (JVMTI_ERROR_ ## e) : do { \
*name_ptr = const_cast<char*>("JVMTI_ERROR_"#e); \
diff --git a/runtime/openjdkjvmti/art_jvmti.h b/runtime/openjdkjvmti/art_jvmti.h
index a321124..48b29a3 100644
--- a/runtime/openjdkjvmti/art_jvmti.h
+++ b/runtime/openjdkjvmti/art_jvmti.h
@@ -52,11 +52,13 @@
struct ArtJvmTiEnv : public jvmtiEnv {
art::JavaVMExt* art_vm;
void* local_data;
+ jvmtiCapabilities capabilities;
EventMasks event_masks;
std::unique_ptr<jvmtiEventCallbacks> event_callbacks;
- explicit ArtJvmTiEnv(art::JavaVMExt* runtime) : art_vm(runtime), local_data(nullptr) {
+ explicit ArtJvmTiEnv(art::JavaVMExt* runtime)
+ : art_vm(runtime), local_data(nullptr), capabilities() {
functions = &gJvmtiInterface;
}
@@ -121,6 +123,50 @@
return ret;
}
+const jvmtiCapabilities kPotentialCapabilities = {
+ .can_tag_objects = 1,
+ .can_generate_field_modification_events = 0,
+ .can_generate_field_access_events = 0,
+ .can_get_bytecodes = 0,
+ .can_get_synthetic_attribute = 0,
+ .can_get_owned_monitor_info = 0,
+ .can_get_current_contended_monitor = 0,
+ .can_get_monitor_info = 0,
+ .can_pop_frame = 0,
+ .can_redefine_classes = 0,
+ .can_signal_thread = 0,
+ .can_get_source_file_name = 0,
+ .can_get_line_numbers = 0,
+ .can_get_source_debug_extension = 0,
+ .can_access_local_variables = 0,
+ .can_maintain_original_method_order = 0,
+ .can_generate_single_step_events = 0,
+ .can_generate_exception_events = 0,
+ .can_generate_frame_pop_events = 0,
+ .can_generate_breakpoint_events = 0,
+ .can_suspend = 0,
+ .can_redefine_any_class = 0,
+ .can_get_current_thread_cpu_time = 0,
+ .can_get_thread_cpu_time = 0,
+ .can_generate_method_entry_events = 0,
+ .can_generate_method_exit_events = 0,
+ .can_generate_all_class_hook_events = 0,
+ .can_generate_compiled_method_load_events = 0,
+ .can_generate_monitor_events = 0,
+ .can_generate_vm_object_alloc_events = 0,
+ .can_generate_native_method_bind_events = 0,
+ .can_generate_garbage_collection_events = 0,
+ .can_generate_object_free_events = 0,
+ .can_force_early_return = 0,
+ .can_get_owned_monitor_stack_depth_info = 0,
+ .can_get_constant_pool = 0,
+ .can_set_native_method_prefix = 0,
+ .can_retransform_classes = 0,
+ .can_retransform_any_class = 0,
+ .can_generate_resource_exhaustion_heap_events = 0,
+ .can_generate_resource_exhaustion_threads_events = 0,
+};
+
} // namespace openjdkjvmti
#endif // ART_RUNTIME_OPENJDKJVMTI_ART_JVMTI_H_
diff --git a/test/478-checker-inline-noreturn/expected.txt b/test/478-checker-inline-noreturn/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/478-checker-inline-noreturn/expected.txt
diff --git a/test/478-checker-inline-noreturn/info.txt b/test/478-checker-inline-noreturn/info.txt
new file mode 100644
index 0000000..64f42ed
--- /dev/null
+++ b/test/478-checker-inline-noreturn/info.txt
@@ -0,0 +1,3 @@
+Tests inlining a function with a no-exit loop into a loop. LinearOrder
+computation fails because of incorrect HLoopInformation if we inline
+a loop without an exit.
diff --git a/test/478-checker-inline-noreturn/src/Main.java b/test/478-checker-inline-noreturn/src/Main.java
new file mode 100644
index 0000000..7aaeac0
--- /dev/null
+++ b/test/478-checker-inline-noreturn/src/Main.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * A test that checks that the inliner does not inline functions that contain
+ * a loop with no exit. This because the incremental update to
+ * HLoopInformation done by the inliner does not work with the LinearOrder
+ * computation if the inlined function does not always return.
+ */
+
+public class Main {
+
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static int $opt$noinline$Function(int x, int y) {
+ int result;
+ if (x <= y) {
+ result = 42;
+ } else {
+ while (true);
+ }
+ return result;
+ }
+
+ /// CHECK-START: int Main.callerLoop(int, int) inliner (before)
+ /// CHECK: InvokeStaticOrDirect method_name:Main.$opt$noinline$Function loop:{{B\d+}}
+
+ /// CHECK-START: int Main.callerLoop(int, int) inliner (after)
+ /// CHECK: InvokeStaticOrDirect method_name:Main.$opt$noinline$Function loop:{{B\d+}}
+
+ public static int callerLoop(int max_x, int max_y) {
+ int total = 0;
+ for (int x = 0; x < max_x; ++x) {
+ total += $opt$noinline$Function(x, max_y);
+ }
+ return total;
+ }
+
+ public static void main(String[] args) {
+ assertIntEquals(42, callerLoop(1, 1));
+ }
+}
diff --git a/test/902-hello-transformation/transform.cc b/test/902-hello-transformation/transform.cc
index 5b0d219..3369dd4 100644
--- a/test/902-hello-transformation/transform.cc
+++ b/test/902-hello-transformation/transform.cc
@@ -23,6 +23,7 @@
#include "base/logging.h"
#include "jni.h"
#include "openjdkjvmti/jvmti.h"
+#include "ti-agent/common_helper.h"
#include "ti-agent/common_load.h"
#include "utils.h"
@@ -132,15 +133,13 @@
jint OnLoad(JavaVM* vm,
char* options,
void* reserved ATTRIBUTE_UNUSED) {
- jvmtiCapabilities caps;
RuntimeIsJvm = (strcmp("jvm", options) == 0);
if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
printf("Unable to get jvmti env!\n");
return 1;
}
+ SetAllCapabilities(jvmti_env);
if (IsJVM()) {
- jvmti_env->GetPotentialCapabilities(&caps);
- jvmti_env->AddCapabilities(&caps);
jvmtiEventCallbacks cbs;
memset(&cbs, 0, sizeof(cbs));
cbs.ClassFileLoadHook = transformationHook;
diff --git a/test/903-hello-tagging/tagging.cc b/test/903-hello-tagging/tagging.cc
index bed4e5d..1557d45 100644
--- a/test/903-hello-tagging/tagging.cc
+++ b/test/903-hello-tagging/tagging.cc
@@ -28,6 +28,7 @@
#include "art_method-inl.h"
#include "base/logging.h"
#include "openjdkjvmti/jvmti.h"
+#include "ti-agent/common_helper.h"
#include "ti-agent/common_load.h"
#include "utils.h"
@@ -145,6 +146,7 @@
printf("Unable to get jvmti env!\n");
return 1;
}
+ SetAllCapabilities(jvmti_env);
return 0;
}
diff --git a/test/904-object-allocation/tracking.cc b/test/904-object-allocation/tracking.cc
index 57bfed5..9261a9f 100644
--- a/test/904-object-allocation/tracking.cc
+++ b/test/904-object-allocation/tracking.cc
@@ -26,6 +26,7 @@
#include "openjdkjvmti/jvmti.h"
#include "ScopedLocalRef.h"
#include "ScopedUtfChars.h"
+#include "ti-agent/common_helper.h"
#include "ti-agent/common_load.h"
#include "utils.h"
@@ -95,6 +96,7 @@
return 1;
}
jvmti_env->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_OBJECT_ALLOC, nullptr);
+ SetAllCapabilities(jvmti_env);
return 0;
}
diff --git a/test/905-object-free/tracking_free.cc b/test/905-object-free/tracking_free.cc
index b41a914..fc43acc 100644
--- a/test/905-object-free/tracking_free.cc
+++ b/test/905-object-free/tracking_free.cc
@@ -26,6 +26,7 @@
#include "openjdkjvmti/jvmti.h"
#include "ScopedLocalRef.h"
#include "ScopedUtfChars.h"
+#include "ti-agent/common_helper.h"
#include "ti-agent/common_load.h"
#include "utils.h"
@@ -87,6 +88,7 @@
printf("Unable to get jvmti env!\n");
return 1;
}
+ SetAllCapabilities(jvmti_env);
return 0;
}
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
index ab1d8d8..8dac89d 100644
--- a/test/906-iterate-heap/iterate_heap.cc
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -25,6 +25,7 @@
#include "jni.h"
#include "openjdkjvmti/jvmti.h"
#include "ScopedPrimitiveArray.h"
+#include "ti-agent/common_helper.h"
#include "ti-agent/common_load.h"
namespace art {
@@ -180,6 +181,7 @@
printf("Unable to get jvmti env!\n");
return 1;
}
+ SetAllCapabilities(jvmti_env);
return 0;
}
diff --git a/test/907-get-loaded-classes/get_loaded_classes.cc b/test/907-get-loaded-classes/get_loaded_classes.cc
index ce929a6..afbb774 100644
--- a/test/907-get-loaded-classes/get_loaded_classes.cc
+++ b/test/907-get-loaded-classes/get_loaded_classes.cc
@@ -72,6 +72,7 @@
printf("Unable to get jvmti env!\n");
return 1;
}
+ SetAllCapabilities(jvmti_env);
return 0;
}
diff --git a/test/908-gc-start-finish/gc_callbacks.cc b/test/908-gc-start-finish/gc_callbacks.cc
index d546513..771d1ad 100644
--- a/test/908-gc-start-finish/gc_callbacks.cc
+++ b/test/908-gc-start-finish/gc_callbacks.cc
@@ -22,6 +22,7 @@
#include "base/macros.h"
#include "jni.h"
#include "openjdkjvmti/jvmti.h"
+#include "ti-agent/common_helper.h"
#include "ti-agent/common_load.h"
namespace art {
@@ -98,6 +99,7 @@
printf("Unable to get jvmti env!\n");
return 1;
}
+ SetAllCapabilities(jvmti_env);
return 0;
}
diff --git a/test/910-methods/methods.cc b/test/910-methods/methods.cc
index 005cba6..8f0850b 100644
--- a/test/910-methods/methods.cc
+++ b/test/910-methods/methods.cc
@@ -109,6 +109,7 @@
printf("Unable to get jvmti env!\n");
return 1;
}
+ SetAllCapabilities(jvmti_env);
return 0;
}
diff --git a/test/911-get-stack-trace/stack_trace.cc b/test/911-get-stack-trace/stack_trace.cc
index a30416d..e7d9380 100644
--- a/test/911-get-stack-trace/stack_trace.cc
+++ b/test/911-get-stack-trace/stack_trace.cc
@@ -87,6 +87,7 @@
printf("Unable to get jvmti env!\n");
return 1;
}
+ SetAllCapabilities(jvmti_env);
return 0;
}
diff --git a/test/912-classes/classes.cc b/test/912-classes/classes.cc
index fbf3259..838a92a 100644
--- a/test/912-classes/classes.cc
+++ b/test/912-classes/classes.cc
@@ -69,6 +69,7 @@
printf("Unable to get jvmti env!\n");
return 1;
}
+ SetAllCapabilities(jvmti_env);
return 0;
}
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index d74026c..bc07fe9 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -27,6 +27,7 @@
#include "base/stringprintf.h"
#include "jni.h"
#include "openjdkjvmti/jvmti.h"
+
#include "ti-agent/common_helper.h"
#include "ti-agent/common_load.h"
@@ -275,6 +276,7 @@
printf("Unable to get jvmti env!\n");
return 1;
}
+ SetAllCapabilities(jvmti_env);
return 0;
}
diff --git a/test/ti-agent/common_helper.h b/test/ti-agent/common_helper.h
index 9aeb98c..84997f3 100644
--- a/test/ti-agent/common_helper.h
+++ b/test/ti-agent/common_helper.h
@@ -53,6 +53,12 @@
return ret.release();
}
+static void SetAllCapabilities(jvmtiEnv* env) {
+ jvmtiCapabilities caps;
+ env->GetPotentialCapabilities(&caps);
+ env->AddCapabilities(&caps);
+}
+
} // namespace art
#endif // ART_TEST_TI_AGENT_COMMON_HELPER_H_