Implement heap poisoning in ART's Optimizing compiler.
- Instrument ARM, ARM64, x86 and x86-64 code generators.
- Note: To turn heap poisoning on in Optimizing, set the
environment variable `ART_HEAP_POISONING' to "true"
before compiling ART.
Bug: 12687968
Change-Id: Ib3120b38cf805a8a50207a314b9ccc90c8d93740
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index afffbe2..c9d19c8 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -213,7 +213,7 @@
__ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex()));
__ gs()->call(Address::Absolute((do_clinit_
? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage)
- : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)) , true));
+ : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)), true));
RecordPcInfo(codegen, at_, dex_pc_);
Location out = locations->Out();
@@ -1429,6 +1429,7 @@
DCHECK(receiver.IsRegister());
__ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset));
codegen_->MaybeRecordImplicitNullCheck(invoke);
+ __ MaybeUnpoisonHeapReference(temp);
// temp = temp->GetMethodAt(method_offset);
__ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
@@ -1466,6 +1467,7 @@
__ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset));
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
+ __ MaybeUnpoisonHeapReference(temp);
// temp = temp->GetImtEntryAt(method_offset);
__ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
@@ -3060,6 +3062,8 @@
InvokeRuntimeCallingConvention calling_convention;
codegen_->Load64BitValue(CpuRegister(calling_convention.GetRegisterAt(0)),
instruction->GetTypeIndex());
+ // Note: if heap poisoning is enabled, the entry point takes cares
+ // of poisoning the reference.
__ gs()->call(
Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true));
@@ -3082,6 +3086,8 @@
codegen_->Load64BitValue(CpuRegister(calling_convention.GetRegisterAt(0)),
instruction->GetTypeIndex());
+ // Note: if heap poisoning is enabled, the entry point takes cares
+ // of poisoning the reference.
__ gs()->call(
Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true));
@@ -3270,6 +3276,10 @@
if (is_volatile) {
GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
+
+ if (field_type == Primitive::kPrimNot) {
+ __ MaybeUnpoisonHeapReference(out.AsRegister<CpuRegister>());
+ }
}
void LocationsBuilderX86_64::HandleFieldSet(HInstruction* instruction,
@@ -3278,8 +3288,9 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ Primitive::Type field_type = field_info.GetFieldType();
bool needs_write_barrier =
- CodeGenerator::StoreNeedsWriteBarrier(field_info.GetFieldType(), instruction->InputAt(1));
+ CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
locations->SetInAt(0, Location::RequiresRegister());
if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
@@ -3289,7 +3300,10 @@
}
if (needs_write_barrier) {
// Temporary registers for the write barrier.
+ locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too.
locations->AddTemp(Location::RequiresRegister());
+ } else if (kPoisonHeapReferences && field_type == Primitive::kPrimNot) {
+ // Temporary register for the reference poisoning.
locations->AddTemp(Location::RequiresRegister());
}
}
@@ -3337,9 +3351,20 @@
case Primitive::kPrimNot: {
if (value.IsConstant()) {
int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
+ // `field_type == Primitive::kPrimNot` implies `v == 0`.
+ DCHECK((field_type != Primitive::kPrimNot) || (v == 0));
+ // Note: if heap poisoning is enabled, no need to poison
+ // (negate) `v` if it is a reference, as it would be null.
__ movl(Address(base, offset), Immediate(v));
} else {
- __ movl(Address(base, offset), value.AsRegister<CpuRegister>());
+ if (kPoisonHeapReferences && field_type == Primitive::kPrimNot) {
+ CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ __ movl(temp, value.AsRegister<CpuRegister>());
+ __ PoisonHeapReference(temp);
+ __ movl(Address(base, offset), temp);
+ } else {
+ __ movl(Address(base, offset), value.AsRegister<CpuRegister>());
+ }
}
break;
}
@@ -3483,8 +3508,9 @@
LocationSummary* locations = instruction->GetLocations();
CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
Location index = locations->InAt(1);
+ Primitive::Type type = instruction->GetType();
- switch (instruction->GetType()) {
+ switch (type) {
case Primitive::kPrimBoolean: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
@@ -3585,10 +3611,15 @@
}
case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
+
+ if (type == Primitive::kPrimNot) {
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ __ MaybeUnpoisonHeapReference(out);
+ }
}
void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) {
@@ -3620,7 +3651,7 @@
if (needs_write_barrier) {
// Temporary registers for the write barrier.
- locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
locations->AddTemp(Location::RequiresRegister());
}
}
@@ -3696,20 +3727,42 @@
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
if (value.IsRegister()) {
- __ movl(Address(obj, offset), value.AsRegister<CpuRegister>());
+ if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) {
+ CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ __ movl(temp, value.AsRegister<CpuRegister>());
+ __ PoisonHeapReference(temp);
+ __ movl(Address(obj, offset), temp);
+ } else {
+ __ movl(Address(obj, offset), value.AsRegister<CpuRegister>());
+ }
} else {
DCHECK(value.IsConstant()) << value;
int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
+ // `value_type == Primitive::kPrimNot` implies `v == 0`.
+ DCHECK((value_type != Primitive::kPrimNot) || (v == 0));
+ // Note: if heap poisoning is enabled, no need to poison
+ // (negate) `v` if it is a reference, as it would be null.
__ movl(Address(obj, offset), Immediate(v));
}
} else {
DCHECK(index.IsRegister()) << index;
if (value.IsRegister()) {
- __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset),
- value.AsRegister<CpuRegister>());
+ if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) {
+ CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ __ movl(temp, value.AsRegister<CpuRegister>());
+ __ PoisonHeapReference(temp);
+ __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset), temp);
+ } else {
+ __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset),
+ value.AsRegister<CpuRegister>());
+ }
} else {
DCHECK(value.IsConstant()) << value;
int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
+ // `value_type == Primitive::kPrimNot` implies `v == 0`.
+ DCHECK((value_type != Primitive::kPrimNot) || (v == 0));
+ // Note: if heap poisoning is enabled, no need to poison
+ // (negate) `v` if it is a reference, as it would be null.
__ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset),
Immediate(v));
}
@@ -3724,6 +3777,8 @@
}
} else {
DCHECK_EQ(value_type, Primitive::kPrimNot);
+ // Note: if heap poisoning is enabled, pAputObject takes cares
+ // of poisoning the reference.
__ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAputObject),
true));
DCHECK(!codegen_->IsLeafMethod());
@@ -3876,7 +3931,7 @@
Thread::CardTableOffset<kX86_64WordSize>().Int32Value(), true));
__ movq(temp, object);
__ shrq(temp, Immediate(gc::accounting::CardTable::kCardShift));
- __ movb(Address(temp, card, TIMES_1, 0), card);
+ __ movb(Address(temp, card, TIMES_1, 0), card);
if (value_can_be_null) {
__ Bind(&is_null);
}
@@ -4187,6 +4242,8 @@
__ movl(out, Address(
current_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
+ __ MaybeUnpoisonHeapReference(out);
+
SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
@@ -4234,7 +4291,9 @@
CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
__ movl(out, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
__ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
+ __ MaybeUnpoisonHeapReference(out);
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
+ __ MaybeUnpoisonHeapReference(out);
__ testl(out, out);
__ j(kEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -4293,6 +4352,7 @@
}
// Compare the class of `obj` with `cls`.
__ movl(out, Address(obj, class_offset));
+ __ MaybeUnpoisonHeapReference(out);
if (cls.IsRegister()) {
__ cmpl(out, cls.AsRegister<CpuRegister>());
} else {
@@ -4351,13 +4411,15 @@
}
// Compare the class of `obj` with `cls`.
__ movl(temp, Address(obj, class_offset));
+ __ MaybeUnpoisonHeapReference(temp);
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<CpuRegister>());
} else {
DCHECK(cls.IsStackSlot()) << cls;
__ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex()));
}
- // Classes must be equal for the checkcast to succeed.
+ // The checkcast succeeds if the classes are equal (fast path).
+ // Otherwise, we need to go into the slow path to check the types.
__ j(kNotEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
@@ -4576,5 +4638,7 @@
return Address::RIP(fixup);
}
+#undef __
+
} // namespace x86_64
} // namespace art