Merge "Revert "Iterative move coalescing for gc regalloc""
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index bac0ff3..7395164 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -252,6 +252,7 @@
art_debug_cflags := \
$(ART_DEBUG_OPT_FLAG) \
-DDYNAMIC_ANNOTATIONS_ENABLED=1 \
+ -DVIXL_DEBUG \
-UNDEBUG
# Assembler flags for non-debug ART and ART tools.
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 1e2c4ef..3d07fc0 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -234,8 +234,6 @@
runtime/interpreter/unstarted_runtime_test.cc \
runtime/java_vm_ext_test.cc \
runtime/jit/profile_compilation_info_test.cc \
- runtime/lambda/closure_test.cc \
- runtime/lambda/shorty_field_type_test.cc \
runtime/leb128_test.cc \
runtime/mem_map_test.cc \
runtime/memory_region_test.cc \
@@ -636,7 +634,7 @@
ifeq ($$(art_target_or_host),target)
$$(eval LOCAL_CLANG := $$(ART_TARGET_CLANG))
$$(eval $$(call set-target-local-cflags-vars,debug))
- LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libz libcutils libvixl-arm64
+ LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libz libcutils libvixld-arm64
LOCAL_MODULE_PATH_32 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_32)
LOCAL_MODULE_PATH_64 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_64)
LOCAL_MULTILIB := both
@@ -680,7 +678,7 @@
LOCAL_CLANG := $$(ART_HOST_CLANG)
LOCAL_CFLAGS += $$(ART_HOST_CFLAGS) $$(ART_HOST_DEBUG_CFLAGS)
LOCAL_ASFLAGS += $$(ART_HOST_ASFLAGS) $$(ART_HOST_DEBUG_ASFLAGS)
- LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixl-arm64
+ LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixld-arm64
LOCAL_LDLIBS := -lpthread -ldl
LOCAL_IS_HOST_MODULE := true
LOCAL_MULTILIB := both
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 7ded3bf..5809dcd 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -501,11 +501,6 @@
EXPECT_SINGLE_PARSE_VALUE(ExperimentalFlags::kNone,
"-Xexperimental:none",
M::Experimental);
-
- // Enabled explicitly
- EXPECT_SINGLE_PARSE_VALUE(ExperimentalFlags::kLambdas,
- "-Xexperimental:lambdas",
- M::Experimental);
}
// -Xverify:_
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index f05648c..1146f95 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -735,8 +735,6 @@
Result ParseAndAppend(const std::string& option, ExperimentalFlags& existing) {
if (option == "none") {
existing = ExperimentalFlags::kNone;
- } else if (option == "lambdas") {
- existing = existing | ExperimentalFlags::kLambdas;
} else {
return Result::Failure(std::string("Unknown option '") + option + "'");
}
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 16a158c..46b7e5d 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -98,6 +98,7 @@
utils/arm/assembler_arm.cc \
utils/arm/assembler_arm32.cc \
utils/arm/assembler_thumb2.cc \
+ utils/arm/jni_macro_assembler_arm.cc \
utils/arm/managed_register_arm.cc \
# TODO We should really separate out those files that are actually needed for both variants of an
@@ -114,6 +115,7 @@
optimizing/instruction_simplifier_shared.cc \
optimizing/intrinsics_arm64.cc \
utils/arm64/assembler_arm64.cc \
+ utils/arm64/jni_macro_assembler_arm64.cc \
utils/arm64/managed_register_arm64.cc \
LIBART_COMPILER_SRC_FILES_mips := \
@@ -285,9 +287,9 @@
# Vixl assembly support for ARM64 targets.
ifeq ($$(art_ndebug_or_debug),debug)
ifeq ($$(art_static_or_shared), static)
- LOCAL_WHOLESTATIC_LIBRARIES += libvixl-arm64
+ LOCAL_WHOLESTATIC_LIBRARIES += libvixld-arm64
else
- LOCAL_SHARED_LIBRARIES += libvixl-arm64
+ LOCAL_SHARED_LIBRARIES += libvixld-arm64
endif
else
ifeq ($$(art_static_or_shared), static)
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 8286033..d0a8335 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -77,10 +77,6 @@
static constexpr bool kTimeCompileMethod = !kIsDebugBuild;
-// Whether classes-to-compile and methods-to-compile are only applied to the boot image, or, when
-// given, too all compilations.
-static constexpr bool kRestrictCompilationFiltersToImage = true;
-
// Print additional info during profile guided compilation.
static constexpr bool kDebugProfileGuidedCompilation = false;
@@ -946,10 +942,6 @@
}
bool CompilerDriver::IsClassToCompile(const char* descriptor) const {
- if (kRestrictCompilationFiltersToImage && !IsBootImage()) {
- return true;
- }
-
if (classes_to_compile_ == nullptr) {
return true;
}
@@ -957,10 +949,6 @@
}
bool CompilerDriver::IsMethodToCompile(const MethodReference& method_ref) const {
- if (kRestrictCompilationFiltersToImage && !IsBootImage()) {
- return true;
- }
-
if (methods_to_compile_ == nullptr) {
return true;
}
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index e223534..86f91c5 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -170,7 +170,7 @@
Runtime* r = Runtime::Current();
r->SetInstructionSet(kRuntimeISA);
ArtMethod* save_method = r->CreateCalleeSaveMethod();
- r->SetCalleeSaveMethod(save_method, Runtime::kSaveAll);
+ r->SetCalleeSaveMethod(save_method, Runtime::kSaveAllCalleeSaves);
QuickMethodFrameInfo frame_info = r->GetRuntimeMethodFrameInfo(save_method);
ASSERT_EQ(kStackAlignment, 16U);
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 7c87a60..efae4d0 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -1373,11 +1373,12 @@
image_methods_[ImageHeader::kResolutionMethod] = runtime->GetResolutionMethod();
image_methods_[ImageHeader::kImtConflictMethod] = runtime->GetImtConflictMethod();
image_methods_[ImageHeader::kImtUnimplementedMethod] = runtime->GetImtUnimplementedMethod();
- image_methods_[ImageHeader::kCalleeSaveMethod] = runtime->GetCalleeSaveMethod(Runtime::kSaveAll);
- image_methods_[ImageHeader::kRefsOnlySaveMethod] =
- runtime->GetCalleeSaveMethod(Runtime::kRefsOnly);
- image_methods_[ImageHeader::kRefsAndArgsSaveMethod] =
- runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+ image_methods_[ImageHeader::kSaveAllCalleeSavesMethod] =
+ runtime->GetCalleeSaveMethod(Runtime::kSaveAllCalleeSaves);
+ image_methods_[ImageHeader::kSaveRefsOnlyMethod] =
+ runtime->GetCalleeSaveMethod(Runtime::kSaveRefsOnly);
+ image_methods_[ImageHeader::kSaveRefsAndArgsMethod] =
+ runtime->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs);
image_methods_[ImageHeader::kSaveEverythingMethod] =
runtime->GetCalleeSaveMethod(Runtime::kSaveEverything);
// Visit image methods first to have the main runtime methods in the first image.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 5eaf11e..ab85c12 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -2531,7 +2531,7 @@
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, ArmEncodableConstantOrRegister(add->InputAt(1), ADD));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
@@ -2568,13 +2568,18 @@
break;
case Primitive::kPrimLong: {
- DCHECK(second.IsRegisterPair());
- __ adds(out.AsRegisterPairLow<Register>(),
- first.AsRegisterPairLow<Register>(),
- ShifterOperand(second.AsRegisterPairLow<Register>()));
- __ adc(out.AsRegisterPairHigh<Register>(),
- first.AsRegisterPairHigh<Register>(),
- ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ if (second.IsConstant()) {
+ uint64_t value = static_cast<uint64_t>(Int64FromConstant(second.GetConstant()));
+ GenerateAddLongConst(out, first, value);
+ } else {
+ DCHECK(second.IsRegisterPair());
+ __ adds(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ adc(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ }
break;
}
@@ -2608,7 +2613,7 @@
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, ArmEncodableConstantOrRegister(sub->InputAt(1), SUB));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
@@ -2644,13 +2649,18 @@
}
case Primitive::kPrimLong: {
- DCHECK(second.IsRegisterPair());
- __ subs(out.AsRegisterPairLow<Register>(),
- first.AsRegisterPairLow<Register>(),
- ShifterOperand(second.AsRegisterPairLow<Register>()));
- __ sbc(out.AsRegisterPairHigh<Register>(),
- first.AsRegisterPairHigh<Register>(),
- ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ if (second.IsConstant()) {
+ uint64_t value = static_cast<uint64_t>(Int64FromConstant(second.GetConstant()));
+ GenerateAddLongConst(out, first, -value);
+ } else {
+ DCHECK(second.IsRegisterPair());
+ __ subs(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ sbc(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ }
break;
}
@@ -4052,31 +4062,51 @@
Opcode opcode) {
uint64_t value = static_cast<uint64_t>(Int64FromConstant(input_cst));
if (Primitive::Is64BitType(input_cst->GetType())) {
- return CanEncodeConstantAsImmediate(Low32Bits(value), opcode) &&
- CanEncodeConstantAsImmediate(High32Bits(value), opcode);
+ Opcode high_opcode = opcode;
+ SetCc low_set_cc = kCcDontCare;
+ switch (opcode) {
+ case SUB:
+ // Flip the operation to an ADD.
+ value = -value;
+ opcode = ADD;
+ FALLTHROUGH_INTENDED;
+ case ADD:
+ if (Low32Bits(value) == 0u) {
+ return CanEncodeConstantAsImmediate(High32Bits(value), opcode, kCcDontCare);
+ }
+ high_opcode = ADC;
+ low_set_cc = kCcSet;
+ break;
+ default:
+ break;
+ }
+ return CanEncodeConstantAsImmediate(Low32Bits(value), opcode, low_set_cc) &&
+ CanEncodeConstantAsImmediate(High32Bits(value), high_opcode, kCcDontCare);
} else {
return CanEncodeConstantAsImmediate(Low32Bits(value), opcode);
}
}
-bool LocationsBuilderARM::CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode) {
+bool LocationsBuilderARM::CanEncodeConstantAsImmediate(uint32_t value,
+ Opcode opcode,
+ SetCc set_cc) {
ShifterOperand so;
ArmAssembler* assembler = codegen_->GetAssembler();
- if (assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, opcode, value, &so)) {
+ if (assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, opcode, value, set_cc, &so)) {
return true;
}
Opcode neg_opcode = kNoOperand;
switch (opcode) {
- case AND:
- neg_opcode = BIC;
- break;
- case ORR:
- neg_opcode = ORN;
- break;
+ case AND: neg_opcode = BIC; value = ~value; break;
+ case ORR: neg_opcode = ORN; value = ~value; break;
+ case ADD: neg_opcode = SUB; value = -value; break;
+ case ADC: neg_opcode = SBC; value = ~value; break;
+ case SUB: neg_opcode = ADD; value = -value; break;
+ case SBC: neg_opcode = ADC; value = ~value; break;
default:
return false;
}
- return assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, neg_opcode, ~value, &so);
+ return assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, neg_opcode, value, set_cc, &so);
}
void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
@@ -6202,6 +6232,34 @@
__ eor(out, first, ShifterOperand(value));
}
+void InstructionCodeGeneratorARM::GenerateAddLongConst(Location out,
+ Location first,
+ uint64_t value) {
+ Register out_low = out.AsRegisterPairLow<Register>();
+ Register out_high = out.AsRegisterPairHigh<Register>();
+ Register first_low = first.AsRegisterPairLow<Register>();
+ Register first_high = first.AsRegisterPairHigh<Register>();
+ uint32_t value_low = Low32Bits(value);
+ uint32_t value_high = High32Bits(value);
+ if (value_low == 0u) {
+ if (out_low != first_low) {
+ __ mov(out_low, ShifterOperand(first_low));
+ }
+ __ AddConstant(out_high, first_high, value_high);
+ return;
+ }
+ __ AddConstantSetFlags(out_low, first_low, value_low);
+ ShifterOperand so;
+ if (__ ShifterOperandCanHold(out_high, first_high, ADC, value_high, kCcDontCare, &so)) {
+ __ adc(out_high, first_high, so);
+ } else if (__ ShifterOperandCanHold(out_low, first_low, SBC, ~value_high, kCcDontCare, &so)) {
+ __ sbc(out_high, first_high, so);
+ } else {
+ LOG(FATAL) << "Unexpected constant " << value_high;
+ UNREACHABLE();
+ }
+}
+
void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
LocationSummary* locations = instruction->GetLocations();
Location first = locations->InAt(0);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index fa7709b..5d9b2dc 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -183,7 +183,7 @@
Location ArithmeticZeroOrFpuRegister(HInstruction* input);
Location ArmEncodableConstantOrRegister(HInstruction* constant, Opcode opcode);
bool CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode);
- bool CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode);
+ bool CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode, SetCc set_cc = kCcDontCare);
CodeGeneratorARM* const codegen_;
InvokeDexCallingConventionVisitorARM parameter_visitor_;
@@ -220,6 +220,7 @@
void GenerateAndConst(Register out, Register first, uint32_t value);
void GenerateOrrConst(Register out, Register first, uint32_t value);
void GenerateEorConst(Register out, Register first, uint32_t value);
+ void GenerateAddLongConst(Location out, Location first, uint64_t value);
void HandleBitwiseOperation(HBinaryOperation* operation);
void HandleCondition(HCondition* condition);
void HandleIntegerRotate(LocationSummary* locations);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index ade2117..a85cd54 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -7099,12 +7099,6 @@
// /* LockWord */ lock_word = LockWord(monitor)
static_assert(sizeof(LockWord) == sizeof(int32_t),
"art::LockWord and int32_t have different sizes.");
- // /* uint32_t */ rb_state = lock_word.ReadBarrierState()
- __ shrl(temp_reg, Immediate(LockWord::kReadBarrierStateShift));
- __ andl(temp_reg, Immediate(LockWord::kReadBarrierStateMask));
- static_assert(
- LockWord::kReadBarrierStateMask == ReadBarrier::rb_ptr_mask_,
- "art::LockWord::kReadBarrierStateMask is not equal to art::ReadBarrier::rb_ptr_mask_.");
// Load fence to prevent load-load reordering.
// Note that this is a no-op, thanks to the x86 memory model.
@@ -7124,8 +7118,13 @@
// if (rb_state == ReadBarrier::gray_ptr_)
// ref = ReadBarrier::Mark(ref);
- __ cmpl(temp_reg, Immediate(ReadBarrier::gray_ptr_));
- __ j(kEqual, slow_path->GetEntryLabel());
+ // Given the numeric representation, it's enough to check the low bit of the
+ // rb_state. We do that by shifting the bit out of the lock word with SHR.
+ static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
+ static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ __ shrl(temp_reg, Immediate(LockWord::kReadBarrierStateShift + 1));
+ __ j(kCarrySet, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index eadb431..e001363 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -6551,12 +6551,6 @@
// /* LockWord */ lock_word = LockWord(monitor)
static_assert(sizeof(LockWord) == sizeof(int32_t),
"art::LockWord and int32_t have different sizes.");
- // /* uint32_t */ rb_state = lock_word.ReadBarrierState()
- __ shrl(temp_reg, Immediate(LockWord::kReadBarrierStateShift));
- __ andl(temp_reg, Immediate(LockWord::kReadBarrierStateMask));
- static_assert(
- LockWord::kReadBarrierStateMask == ReadBarrier::rb_ptr_mask_,
- "art::LockWord::kReadBarrierStateMask is not equal to art::ReadBarrier::rb_ptr_mask_.");
// Load fence to prevent load-load reordering.
// Note that this is a no-op, thanks to the x86-64 memory model.
@@ -6576,8 +6570,13 @@
// if (rb_state == ReadBarrier::gray_ptr_)
// ref = ReadBarrier::Mark(ref);
- __ cmpl(temp_reg, Immediate(ReadBarrier::gray_ptr_));
- __ j(kEqual, slow_path->GetEntryLabel());
+ // Given the numeric representation, it's enough to check the low bit of the
+ // rb_state. We do that by shifting the bit out of the lock word with SHR.
+ static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
+ static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ __ shrl(temp_reg, Immediate(LockWord::kReadBarrierStateShift + 1));
+ __ j(kCarrySet, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 3429a8f..1a8eb58 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -27,9 +27,6 @@
class CompilerDriver;
class DexFile;
-// Temporary measure until we have caught up with the Java 7 definition of Math.round. b/26327751
-static constexpr bool kRoundIsPlusPointFive = false;
-
// Positive floating-point infinities.
static constexpr uint32_t kPositiveInfinityFloat = 0x7f800000U;
static constexpr uint64_t kPositiveInfinityDouble = UINT64_C(0x7ff0000000000000);
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index e7c40e6..e233672 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1160,8 +1160,10 @@
MacroAssembler* masm = GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
- Register str = XRegisterFrom(locations->InAt(0));
- Register arg = XRegisterFrom(locations->InAt(1));
+ Register str = InputRegisterAt(invoke, 0);
+ Register arg = InputRegisterAt(invoke, 1);
+ DCHECK(str.IsW());
+ DCHECK(arg.IsW());
Register out = OutputRegister(invoke);
Register temp0 = WRegisterFrom(locations->GetTemp(0));
@@ -1192,8 +1194,8 @@
__ Subs(out, str, arg);
__ B(&end, eq);
// Load lengths of this and argument strings.
- __ Ldr(temp0, MemOperand(str.X(), count_offset));
- __ Ldr(temp1, MemOperand(arg.X(), count_offset));
+ __ Ldr(temp0, HeapOperand(str, count_offset));
+ __ Ldr(temp1, HeapOperand(arg, count_offset));
// Return zero if both strings are empty.
__ Orr(out, temp0, temp1);
__ Cbz(out, &end);
@@ -1222,8 +1224,8 @@
// Loop to compare 4x16-bit characters at a time (ok because of string data alignment).
__ Bind(&loop);
- __ Ldr(temp4, MemOperand(str.X(), temp1));
- __ Ldr(temp0, MemOperand(arg.X(), temp1));
+ __ Ldr(temp4, MemOperand(str.X(), temp1.X()));
+ __ Ldr(temp0, MemOperand(arg.X(), temp1.X()));
__ Cmp(temp4, temp0);
__ B(ne, &find_char_diff);
__ Add(temp1, temp1, char_size * 4);
@@ -1242,14 +1244,14 @@
__ Clz(temp1, temp1);
// If the number of 16-bit chars remaining <= the index where the difference occurs (0-3), then
// the difference occurs outside the remaining string data, so just return length diff (out).
- __ Cmp(temp2, Operand(temp1, LSR, 4));
+ __ Cmp(temp2, Operand(temp1.W(), LSR, 4));
__ B(le, &end);
// Extract the characters and calculate the difference.
__ Bic(temp1, temp1, 0xf);
__ Lsr(temp0, temp0, temp1);
__ Lsr(temp4, temp4, temp1);
__ And(temp4, temp4, 0xffff);
- __ Sub(out, temp4, Operand(temp0, UXTH));
+ __ Sub(out, temp4.W(), Operand(temp0.W(), UXTH));
__ Bind(&end);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index dc409c9..22f4181 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -753,11 +753,6 @@
}
void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) {
- // See intrinsics.h.
- if (!kRoundIsPlusPointFive) {
- return;
- }
-
// Do we have instruction support?
if (codegen_->GetInstructionSetFeatures().HasSSE4_1()) {
HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect();
@@ -795,7 +790,6 @@
}
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
- Register constant_area = locations->InAt(1).AsRegister<Register>();
XmmRegister t1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
XmmRegister t2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
Register out = locations->Out().AsRegister<Register>();
@@ -810,10 +804,23 @@
__ movss(t2, in);
__ roundss(t1, in, Immediate(1));
__ subss(t2, t1);
- __ comiss(t2, codegen_->LiteralInt32Address(bit_cast<int32_t, float>(0.5f), constant_area));
- __ j(kBelow, &skip_incr);
- __ addss(t1, codegen_->LiteralInt32Address(bit_cast<int32_t, float>(1.0f), constant_area));
- __ Bind(&skip_incr);
+ if (locations->GetInputCount() == 2 && locations->InAt(1).IsValid()) {
+ // Direct constant area available.
+ Register constant_area = locations->InAt(1).AsRegister<Register>();
+ __ comiss(t2, codegen_->LiteralInt32Address(bit_cast<int32_t, float>(0.5f), constant_area));
+ __ j(kBelow, &skip_incr);
+ __ addss(t1, codegen_->LiteralInt32Address(bit_cast<int32_t, float>(1.0f), constant_area));
+ __ Bind(&skip_incr);
+ } else {
+ // No constant area: go through stack.
+ __ pushl(Immediate(bit_cast<int32_t, float>(0.5f)));
+ __ pushl(Immediate(bit_cast<int32_t, float>(1.0f)));
+ __ comiss(t2, Address(ESP, 4));
+ __ j(kBelow, &skip_incr);
+ __ addss(t1, Address(ESP, 0));
+ __ Bind(&skip_incr);
+ __ addl(ESP, Immediate(8));
+ }
// Final conversion to an integer. Unfortunately this also does not have a
// direct x86 instruction, since NaN should map to 0 and large positive
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 7dfbfb0..ab8b05c 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -598,10 +598,6 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathRoundFloat(HInvoke* invoke) {
- // See intrinsics.h.
- if (!kRoundIsPlusPointFive) {
- return;
- }
CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
}
@@ -646,10 +642,6 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathRoundDouble(HInvoke* invoke) {
- // See intrinsics.h.
- if (!kRoundIsPlusPointFive) {
- return;
- }
CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
}
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index aadc43f..d5cd59d 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -376,499 +376,6 @@
}
}
-static dwarf::Reg DWARFReg(Register reg) {
- return dwarf::Reg::ArmCore(static_cast<int>(reg));
-}
-
-static dwarf::Reg DWARFReg(SRegister reg) {
- return dwarf::Reg::ArmFp(static_cast<int>(reg));
-}
-
-constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);
-
-void ArmAssembler::BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) {
- CHECK_EQ(buffer_.Size(), 0U); // Nothing emitted yet
- CHECK_ALIGNED(frame_size, kStackAlignment);
- CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
-
- // Push callee saves and link register.
- RegList core_spill_mask = 1 << LR;
- uint32_t fp_spill_mask = 0;
- for (const ManagedRegister& reg : callee_save_regs) {
- if (reg.AsArm().IsCoreRegister()) {
- core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
- } else {
- fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
- }
- }
- PushList(core_spill_mask);
- cfi_.AdjustCFAOffset(POPCOUNT(core_spill_mask) * kFramePointerSize);
- cfi_.RelOffsetForMany(DWARFReg(Register(0)), 0, core_spill_mask, kFramePointerSize);
- if (fp_spill_mask != 0) {
- vpushs(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
- cfi_.AdjustCFAOffset(POPCOUNT(fp_spill_mask) * kFramePointerSize);
- cfi_.RelOffsetForMany(DWARFReg(SRegister(0)), 0, fp_spill_mask, kFramePointerSize);
- }
-
- // Increase frame to required size.
- int pushed_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
- CHECK_GT(frame_size, pushed_values * kFramePointerSize); // Must at least have space for Method*.
- IncreaseFrameSize(frame_size - pushed_values * kFramePointerSize); // handles CFI as well.
-
- // Write out Method*.
- StoreToOffset(kStoreWord, R0, SP, 0);
-
- // Write out entry spills.
- int32_t offset = frame_size + kFramePointerSize;
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- ArmManagedRegister reg = entry_spills.at(i).AsArm();
- if (reg.IsNoRegister()) {
- // only increment stack offset.
- ManagedRegisterSpill spill = entry_spills.at(i);
- offset += spill.getSize();
- } else if (reg.IsCoreRegister()) {
- StoreToOffset(kStoreWord, reg.AsCoreRegister(), SP, offset);
- offset += 4;
- } else if (reg.IsSRegister()) {
- StoreSToOffset(reg.AsSRegister(), SP, offset);
- offset += 4;
- } else if (reg.IsDRegister()) {
- StoreDToOffset(reg.AsDRegister(), SP, offset);
- offset += 8;
- }
- }
-}
-
-void ArmAssembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) {
- CHECK_ALIGNED(frame_size, kStackAlignment);
- cfi_.RememberState();
-
- // Compute callee saves to pop and PC.
- RegList core_spill_mask = 1 << PC;
- uint32_t fp_spill_mask = 0;
- for (const ManagedRegister& reg : callee_save_regs) {
- if (reg.AsArm().IsCoreRegister()) {
- core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
- } else {
- fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
- }
- }
-
- // Decrease frame to start of callee saves.
- int pop_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
- CHECK_GT(frame_size, pop_values * kFramePointerSize);
- DecreaseFrameSize(frame_size - (pop_values * kFramePointerSize)); // handles CFI as well.
-
- if (fp_spill_mask != 0) {
- vpops(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
- cfi_.AdjustCFAOffset(-kFramePointerSize * POPCOUNT(fp_spill_mask));
- cfi_.RestoreMany(DWARFReg(SRegister(0)), fp_spill_mask);
- }
-
- // Pop callee saves and PC.
- PopList(core_spill_mask);
-
- // The CFI should be restored for any code that follows the exit block.
- cfi_.RestoreState();
- cfi_.DefCFAOffset(frame_size);
-}
-
-void ArmAssembler::IncreaseFrameSize(size_t adjust) {
- AddConstant(SP, -adjust);
- cfi_.AdjustCFAOffset(adjust);
-}
-
-void ArmAssembler::DecreaseFrameSize(size_t adjust) {
- AddConstant(SP, adjust);
- cfi_.AdjustCFAOffset(-adjust);
-}
-
-void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
- ArmManagedRegister src = msrc.AsArm();
- if (src.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (src.IsCoreRegister()) {
- CHECK_EQ(4u, size);
- StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
- } else if (src.IsRegisterPair()) {
- CHECK_EQ(8u, size);
- StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
- StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
- SP, dest.Int32Value() + 4);
- } else if (src.IsSRegister()) {
- StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
- } else {
- CHECK(src.IsDRegister()) << src;
- StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
- }
-}
-
-void ArmAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
- ArmManagedRegister src = msrc.AsArm();
- CHECK(src.IsCoreRegister()) << src;
- StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-void ArmAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
- ArmManagedRegister src = msrc.AsArm();
- CHECK(src.IsCoreRegister()) << src;
- StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-void ArmAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
- FrameOffset in_off, ManagedRegister mscratch) {
- ArmManagedRegister src = msrc.AsArm();
- ArmManagedRegister scratch = mscratch.AsArm();
- StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
-}
-
-void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) {
- ArmManagedRegister dst = mdest.AsArm();
- CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
- LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
- base.AsArm().AsCoreRegister(), offs.Int32Value());
- if (unpoison_reference) {
- MaybeUnpoisonHeapReference(dst.AsCoreRegister());
- }
-}
-
-void ArmAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
- ArmManagedRegister dst = mdest.AsArm();
- CHECK(dst.IsCoreRegister()) << dst;
- LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value());
-}
-
-void ArmAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
- Offset offs) {
- ArmManagedRegister dst = mdest.AsArm();
- CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
- LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
- base.AsArm().AsCoreRegister(), offs.Int32Value());
-}
-
-void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- LoadImmediate(scratch.AsCoreRegister(), imm);
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-static void EmitLoad(ArmAssembler* assembler, ManagedRegister m_dst,
- Register src_register, int32_t src_offset, size_t size) {
- ArmManagedRegister dst = m_dst.AsArm();
- if (dst.IsNoRegister()) {
- CHECK_EQ(0u, size) << dst;
- } else if (dst.IsCoreRegister()) {
- CHECK_EQ(4u, size) << dst;
- assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
- } else if (dst.IsRegisterPair()) {
- CHECK_EQ(8u, size) << dst;
- assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
- assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
- } else if (dst.IsSRegister()) {
- assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset);
- } else {
- CHECK(dst.IsDRegister()) << dst;
- assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
- }
-}
-
-void ArmAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
- return EmitLoad(this, m_dst, SP, src.Int32Value(), size);
-}
-
-void ArmAssembler::LoadFromThread(ManagedRegister m_dst, ThreadOffset32 src, size_t size) {
- return EmitLoad(this, m_dst, TR, src.Int32Value(), size);
-}
-
-void ArmAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset32 offs) {
- ArmManagedRegister dst = m_dst.AsArm();
- CHECK(dst.IsCoreRegister()) << dst;
- LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
-}
-
-void ArmAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
- ThreadOffset32 thr_offs,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- TR, thr_offs.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
- SP, fr_offs.Int32Value());
-}
-
-void ArmAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- SP, fr_offs.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
- TR, thr_offs.Int32Value());
-}
-
-void ArmAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
- TR, thr_offs.Int32Value());
-}
-
-void ArmAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
- StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
-}
-
-void ArmAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
-}
-
-void ArmAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
-}
-
-void ArmAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) {
- ArmManagedRegister dst = m_dst.AsArm();
- ArmManagedRegister src = m_src.AsArm();
- if (!dst.Equals(src)) {
- if (dst.IsCoreRegister()) {
- CHECK(src.IsCoreRegister()) << src;
- mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
- } else if (dst.IsDRegister()) {
- CHECK(src.IsDRegister()) << src;
- vmovd(dst.AsDRegister(), src.AsDRegister());
- } else if (dst.IsSRegister()) {
- CHECK(src.IsSRegister()) << src;
- vmovs(dst.AsSRegister(), src.AsSRegister());
- } else {
- CHECK(dst.IsRegisterPair()) << dst;
- CHECK(src.IsRegisterPair()) << src;
- // Ensure that the first move doesn't clobber the input of the second.
- if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
- mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
- mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
- } else {
- mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
- mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
- }
- }
- }
-}
-
-void ArmAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
- } else if (size == 8) {
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
- }
-}
-
-void ArmAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- Register scratch = mscratch.AsArm().AsCoreRegister();
- CHECK_EQ(size, 4u);
- LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
- StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
-}
-
-void ArmAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister mscratch, size_t size) {
- Register scratch = mscratch.AsArm().AsCoreRegister();
- CHECK_EQ(size, 4u);
- LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
- StoreToOffset(kStoreWord, scratch, dest_base.AsArm().AsCoreRegister(), dest_offset.Int32Value());
-}
-
-void ArmAssembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
- ManagedRegister /*mscratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL);
-}
-
-void ArmAssembler::Copy(ManagedRegister dest, Offset dest_offset,
- ManagedRegister src, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- CHECK_EQ(size, 4u);
- Register scratch = mscratch.AsArm().AsCoreRegister();
- LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
- StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
-}
-
-void ArmAssembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
- ManagedRegister /*scratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL);
-}
-
-void ArmAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
- FrameOffset handle_scope_offset,
- ManagedRegister min_reg, bool null_allowed) {
- ArmManagedRegister out_reg = mout_reg.AsArm();
- ArmManagedRegister in_reg = min_reg.AsArm();
- CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
- CHECK(out_reg.IsCoreRegister()) << out_reg;
- if (null_allowed) {
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
- if (in_reg.IsNoRegister()) {
- LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
- SP, handle_scope_offset.Int32Value());
- in_reg = out_reg;
- }
- cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
- if (!out_reg.Equals(in_reg)) {
- it(EQ, kItElse);
- LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
- } else {
- it(NE);
- }
- AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
- } else {
- AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
- }
-}
-
-void ArmAssembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- ManagedRegister mscratch,
- bool null_allowed) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- if (null_allowed) {
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
- handle_scope_offset.Int32Value());
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
- cmp(scratch.AsCoreRegister(), ShifterOperand(0));
- it(NE);
- AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
- } else {
- AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
- }
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
-}
-
-void ArmAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
- ManagedRegister min_reg) {
- ArmManagedRegister out_reg = mout_reg.AsArm();
- ArmManagedRegister in_reg = min_reg.AsArm();
- CHECK(out_reg.IsCoreRegister()) << out_reg;
- CHECK(in_reg.IsCoreRegister()) << in_reg;
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- LoadImmediate(out_reg.AsCoreRegister(), 0, EQ); // TODO: why EQ?
- }
- cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
- it(NE);
- LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
- in_reg.AsCoreRegister(), 0, NE);
-}
-
-void ArmAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references.
-}
-
-void ArmAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references.
-}
-
-void ArmAssembler::Call(ManagedRegister mbase, Offset offset,
- ManagedRegister mscratch) {
- ArmManagedRegister base = mbase.AsArm();
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(base.IsCoreRegister()) << base;
- CHECK(scratch.IsCoreRegister()) << scratch;
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- base.AsCoreRegister(), offset.Int32Value());
- blx(scratch.AsCoreRegister());
- // TODO: place reference map on call.
-}
-
-void ArmAssembler::Call(FrameOffset base, Offset offset,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- // Call *(*(SP + base) + offset)
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- SP, base.Int32Value());
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- scratch.AsCoreRegister(), offset.Int32Value());
- blx(scratch.AsCoreRegister());
- // TODO: place reference map on call
-}
-
-void ArmAssembler::CallFromThread(ThreadOffset32 offset ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void ArmAssembler::GetCurrentThread(ManagedRegister tr) {
- mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
-}
-
-void ArmAssembler::GetCurrentThread(FrameOffset offset,
- ManagedRegister /*scratch*/) {
- StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
-}
-
-void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
- ArmManagedRegister scratch = mscratch.AsArm();
- ArmExceptionSlowPath* slow = new (GetArena()) ArmExceptionSlowPath(scratch, stack_adjust);
- buffer_.EnqueueSlowPath(slow);
- LoadFromOffset(kLoadWord,
- scratch.AsCoreRegister(),
- TR,
- Thread::ExceptionOffset<kArmPointerSize>().Int32Value());
- cmp(scratch.AsCoreRegister(), ShifterOperand(0));
- b(slow->Entry(), NE);
-}
-
-void ArmExceptionSlowPath::Emit(Assembler* sasm) {
- ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
-#define __ sp_asm->
- __ Bind(&entry_);
- if (stack_adjust_ != 0) { // Fix up the frame.
- __ DecreaseFrameSize(stack_adjust_);
- }
- // Pass exception object as argument.
- // Don't care about preserving R0 as this call won't return.
- __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
- // Set up call to Thread::Current()->pDeliverException.
- __ LoadFromOffset(kLoadWord,
- R12,
- TR,
- QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pDeliverException).Int32Value());
- __ blx(R12);
-#undef __
-}
-
-
static int LeadingZeros(uint32_t val) {
uint32_t alt;
int32_t n;
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index bb88e6f..ff0bbaf 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -435,19 +435,10 @@
// This is an abstract ARM assembler. Subclasses provide assemblers for the individual
// instruction sets (ARM32, Thumb2, etc.)
//
-class ArmAssembler : public Assembler, public JNIMacroAssembler<PointerSize::k32> {
+class ArmAssembler : public Assembler {
public:
virtual ~ArmAssembler() {}
- size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
- DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
- void FinalizeCode() OVERRIDE {
- Assembler::FinalizeCode();
- }
- void FinalizeInstructions(const MemoryRegion& region) {
- Assembler::FinalizeInstructions(region);
- }
-
// Is this assembler for the thumb instruction set?
virtual bool IsThumb() const = 0;
@@ -891,121 +882,6 @@
virtual void CompareAndBranchIfZero(Register r, Label* label) = 0;
virtual void CompareAndBranchIfNonZero(Register r, Label* label) = 0;
- //
- // Overridden common assembler high-level functionality
- //
-
- // Emit code that will create an activation on the stack
- void BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
-
- // Emit code that will remove an activation from the stack
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
-
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
-
- // Store routines
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
-
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
-
- void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
-
- void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
-
- // Load routines
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
-
- void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
-
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
-
- void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
-
- // Copying routines
- void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
-
- void CopyRawPtrFromThread(FrameOffset fr_offs,
- ThreadOffset32 thr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
-
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- // Sign extension
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Zero extension
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Exploit fast access in managed code to Thread::Current()
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
-
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) OVERRIDE;
-
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) OVERRIDE;
-
- // src holds a handle scope entry (Object**) load this into dst
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
-
- // Heap::VerifyObject on src. In some cases (such as a reference to this) we
- // know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
-
- // Call to address held at [base+offset]
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
-
- // Generate code to check if Thread::Current()->exception_ is non-null
- // and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
-
static uint32_t ModifiedImmediate(uint32_t value);
static bool IsLowRegister(Register r) {
@@ -1083,18 +959,6 @@
ArenaVector<Label*> tracked_labels_;
};
-// Slowpath entered when Thread::Current()->_exception is non-null
-class ArmExceptionSlowPath FINAL : public SlowPath {
- public:
- ArmExceptionSlowPath(ArmManagedRegister scratch, size_t stack_adjust)
- : scratch_(scratch), stack_adjust_(stack_adjust) {
- }
- void Emit(Assembler *sp_asm) OVERRIDE;
- private:
- const ArmManagedRegister scratch_;
- const size_t stack_adjust_;
-};
-
} // namespace arm
} // namespace art
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index c95dfa8..6f9d5f3 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -1664,12 +1664,6 @@
}
-void Arm32Assembler::MemoryBarrier(ManagedRegister mscratch) {
- CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
- dmb(SY);
-}
-
-
void Arm32Assembler::dmb(DmbOptions flavor) {
int32_t encoding = 0xf57ff05f; // dmb
Emit(encoding | flavor);
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index 554dd23..044eaa1 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -316,8 +316,6 @@
void Emit(int32_t value);
void Bind(Label* label) OVERRIDE;
- void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
-
JumpTable* CreateJumpTable(std::vector<Label*>&& labels, Register base_reg) OVERRIDE;
void EmitJumpTableDispatch(JumpTable* jump_table, Register displacement_reg) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 4be7aae..ee69698 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -3863,12 +3863,6 @@
}
-void Thumb2Assembler::MemoryBarrier(ManagedRegister mscratch) {
- CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
- dmb(SY);
-}
-
-
void Thumb2Assembler::dmb(DmbOptions flavor) {
int32_t encoding = 0xf3bf8f50; // dmb in T1 encoding.
Emit32(encoding | flavor);
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 4ee23c0..1c1c98b 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -368,8 +368,6 @@
void Emit16(int16_t value); // Emit a 16 bit instruction in little endian format.
void Bind(Label* label) OVERRIDE;
- void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
-
// Force the assembler to generate 32 bit instructions.
void Force32Bit() {
force_32bit_ = true;
diff --git a/compiler/utils/arm/jni_macro_assembler_arm.cc b/compiler/utils/arm/jni_macro_assembler_arm.cc
new file mode 100644
index 0000000..c039816
--- /dev/null
+++ b/compiler/utils/arm/jni_macro_assembler_arm.cc
@@ -0,0 +1,612 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler_arm.h"
+
+#include <algorithm>
+
+#include "assembler_arm32.h"
+#include "assembler_thumb2.h"
+#include "base/arena_allocator.h"
+#include "base/bit_utils.h"
+#include "base/logging.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "offsets.h"
+#include "thread.h"
+
+namespace art {
+namespace arm {
+
+constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class ArmExceptionSlowPath FINAL : public SlowPath {
+ public:
+ ArmExceptionSlowPath(ArmManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {
+ }
+ void Emit(Assembler *sp_asm) OVERRIDE;
+ private:
+ const ArmManagedRegister scratch_;
+ const size_t stack_adjust_;
+};
+
+ArmJNIMacroAssembler::ArmJNIMacroAssembler(ArenaAllocator* arena, InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ asm_.reset(new (arena) Arm32Assembler(arena));
+ break;
+
+ case kThumb2:
+ asm_.reset(new (arena) Thumb2Assembler(arena));
+ break;
+
+ default:
+ LOG(FATAL) << isa;
+ UNREACHABLE();
+ }
+}
+
+ArmJNIMacroAssembler::~ArmJNIMacroAssembler() {
+}
+
+size_t ArmJNIMacroAssembler::CodeSize() const {
+ return asm_->CodeSize();
+}
+
+DebugFrameOpCodeWriterForAssembler& ArmJNIMacroAssembler::cfi() {
+ return asm_->cfi();
+}
+
+void ArmJNIMacroAssembler::FinalizeCode() {
+ asm_->FinalizeCode();
+}
+
+void ArmJNIMacroAssembler::FinalizeInstructions(const MemoryRegion& region) {
+ asm_->FinalizeInstructions(region);
+}
+
+static dwarf::Reg DWARFReg(Register reg) {
+ return dwarf::Reg::ArmCore(static_cast<int>(reg));
+}
+
+static dwarf::Reg DWARFReg(SRegister reg) {
+ return dwarf::Reg::ArmFp(static_cast<int>(reg));
+}
+
+#define __ asm_->
+
+void ArmJNIMacroAssembler::BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ CHECK_EQ(CodeSize(), 0U); // Nothing emitted yet
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
+
+ // Push callee saves and link register.
+ RegList core_spill_mask = 1 << LR;
+ uint32_t fp_spill_mask = 0;
+ for (const ManagedRegister& reg : callee_save_regs) {
+ if (reg.AsArm().IsCoreRegister()) {
+ core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
+ } else {
+ fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
+ }
+ }
+ __ PushList(core_spill_mask);
+ cfi().AdjustCFAOffset(POPCOUNT(core_spill_mask) * kFramePointerSize);
+ cfi().RelOffsetForMany(DWARFReg(Register(0)), 0, core_spill_mask, kFramePointerSize);
+ if (fp_spill_mask != 0) {
+ __ vpushs(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
+ cfi().AdjustCFAOffset(POPCOUNT(fp_spill_mask) * kFramePointerSize);
+ cfi().RelOffsetForMany(DWARFReg(SRegister(0)), 0, fp_spill_mask, kFramePointerSize);
+ }
+
+ // Increase frame to required size.
+ int pushed_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
+ CHECK_GT(frame_size, pushed_values * kFramePointerSize); // Must at least have space for Method*.
+ IncreaseFrameSize(frame_size - pushed_values * kFramePointerSize); // handles CFI as well.
+
+ // Write out Method*.
+ __ StoreToOffset(kStoreWord, R0, SP, 0);
+
+ // Write out entry spills.
+ int32_t offset = frame_size + kFramePointerSize;
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ ArmManagedRegister reg = entry_spills.at(i).AsArm();
+ if (reg.IsNoRegister()) {
+ // only increment stack offset.
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ offset += spill.getSize();
+ } else if (reg.IsCoreRegister()) {
+ __ StoreToOffset(kStoreWord, reg.AsCoreRegister(), SP, offset);
+ offset += 4;
+ } else if (reg.IsSRegister()) {
+ __ StoreSToOffset(reg.AsSRegister(), SP, offset);
+ offset += 4;
+ } else if (reg.IsDRegister()) {
+ __ StoreDToOffset(reg.AsDRegister(), SP, offset);
+ offset += 8;
+ }
+ }
+}
+
+void ArmJNIMacroAssembler::RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ cfi().RememberState();
+
+ // Compute callee saves to pop and PC.
+ RegList core_spill_mask = 1 << PC;
+ uint32_t fp_spill_mask = 0;
+ for (const ManagedRegister& reg : callee_save_regs) {
+ if (reg.AsArm().IsCoreRegister()) {
+ core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
+ } else {
+ fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
+ }
+ }
+
+ // Decrease frame to start of callee saves.
+ int pop_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
+ CHECK_GT(frame_size, pop_values * kFramePointerSize);
+ DecreaseFrameSize(frame_size - (pop_values * kFramePointerSize)); // handles CFI as well.
+
+ if (fp_spill_mask != 0) {
+ __ vpops(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
+ cfi().AdjustCFAOffset(-kFramePointerSize * POPCOUNT(fp_spill_mask));
+ cfi().RestoreMany(DWARFReg(SRegister(0)), fp_spill_mask);
+ }
+
+ // Pop callee saves and PC.
+ __ PopList(core_spill_mask);
+
+ // The CFI should be restored for any code that follows the exit block.
+ cfi().RestoreState();
+ cfi().DefCFAOffset(frame_size);
+}
+
+void ArmJNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+ __ AddConstant(SP, -adjust);
+ cfi().AdjustCFAOffset(adjust);
+}
+
+static void DecreaseFrameSizeImpl(ArmAssembler* assembler, size_t adjust) {
+ assembler->AddConstant(SP, adjust);
+ assembler->cfi().AdjustCFAOffset(-adjust);
+}
+
+void ArmJNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+ DecreaseFrameSizeImpl(asm_.get(), adjust);
+}
+
+void ArmJNIMacroAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
+ ArmManagedRegister src = msrc.AsArm();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCoreRegister()) {
+ CHECK_EQ(4u, size);
+ __ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ __ StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
+ __ StoreToOffset(kStoreWord, src.AsRegisterPairHigh(), SP, dest.Int32Value() + 4);
+ } else if (src.IsSRegister()) {
+ __ StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
+ } else {
+ CHECK(src.IsDRegister()) << src;
+ __ StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
+ }
+}
+
+void ArmJNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ ArmManagedRegister src = msrc.AsArm();
+ CHECK(src.IsCoreRegister()) << src;
+ __ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmJNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ ArmManagedRegister src = msrc.AsArm();
+ CHECK(src.IsCoreRegister()) << src;
+ __ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmJNIMacroAssembler::StoreSpanning(FrameOffset dest,
+ ManagedRegister msrc,
+ FrameOffset in_off,
+ ManagedRegister mscratch) {
+ ArmManagedRegister src = msrc.AsArm();
+ ArmManagedRegister scratch = mscratch.AsArm();
+ __ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + sizeof(uint32_t));
+}
+
+void ArmJNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmJNIMacroAssembler::LoadRef(ManagedRegister mdest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) {
+ ArmManagedRegister dst = mdest.AsArm();
+ CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+ __ LoadFromOffset(kLoadWord,
+ dst.AsCoreRegister(),
+ base.AsArm().AsCoreRegister(),
+ offs.Int32Value());
+ if (unpoison_reference) {
+ __ MaybeUnpoisonHeapReference(dst.AsCoreRegister());
+ }
+}
+
+void ArmJNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+ ArmManagedRegister dst = mdest.AsArm();
+ CHECK(dst.IsCoreRegister()) << dst;
+ __ LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value());
+}
+
+void ArmJNIMacroAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+ Offset offs) {
+ ArmManagedRegister dst = mdest.AsArm();
+ CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+ __ LoadFromOffset(kLoadWord,
+ dst.AsCoreRegister(),
+ base.AsArm().AsCoreRegister(),
+ offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest,
+ uint32_t imm,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ __ LoadImmediate(scratch.AsCoreRegister(), imm);
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+static void EmitLoad(ArmAssembler* assembler,
+ ManagedRegister m_dst,
+ Register src_register,
+ int32_t src_offset,
+ size_t size) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ if (dst.IsNoRegister()) {
+ CHECK_EQ(0u, size) << dst;
+ } else if (dst.IsCoreRegister()) {
+ CHECK_EQ(4u, size) << dst;
+ assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
+ } else if (dst.IsRegisterPair()) {
+ CHECK_EQ(8u, size) << dst;
+ assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
+ assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
+ } else if (dst.IsSRegister()) {
+ assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset);
+ } else {
+ CHECK(dst.IsDRegister()) << dst;
+ assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
+ }
+}
+
+void ArmJNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
+ EmitLoad(asm_.get(), m_dst, SP, src.Int32Value(), size);
+}
+
+void ArmJNIMacroAssembler::LoadFromThread(ManagedRegister m_dst, ThreadOffset32 src, size_t size) {
+ EmitLoad(asm_.get(), m_dst, TR, src.Int32Value(), size);
+}
+
+void ArmJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset32 offs) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ CHECK(dst.IsCoreRegister()) << dst;
+ __ LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), TR, thr_offs.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, thr_offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ __ AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, thr_offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
+ __ StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
+}
+
+void ArmJNIMacroAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
+}
+
+void ArmJNIMacroAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ ArmManagedRegister src = m_src.AsArm();
+ if (!dst.Equals(src)) {
+ if (dst.IsCoreRegister()) {
+ CHECK(src.IsCoreRegister()) << src;
+ __ mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
+ } else if (dst.IsDRegister()) {
+ CHECK(src.IsDRegister()) << src;
+ __ vmovd(dst.AsDRegister(), src.AsDRegister());
+ } else if (dst.IsSRegister()) {
+ CHECK(src.IsSRegister()) << src;
+ __ vmovs(dst.AsSRegister(), src.AsSRegister());
+ } else {
+ CHECK(dst.IsRegisterPair()) << dst;
+ CHECK(src.IsRegisterPair()) << src;
+ // Ensure that the first move doesn't clobber the input of the second.
+ if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
+ __ mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
+ __ mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
+ } else {
+ __ mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
+ __ mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
+ }
+ }
+ }
+}
+
+void ArmJNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src,
+ ManagedRegister mscratch,
+ size_t size) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+ } else if (size == 8) {
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+ }
+}
+
+void ArmJNIMacroAssembler::Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ Register scratch = mscratch.AsArm().AsCoreRegister();
+ CHECK_EQ(size, 4u);
+ __ LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
+}
+
+void ArmJNIMacroAssembler::Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister mscratch,
+ size_t size) {
+ Register scratch = mscratch.AsArm().AsCoreRegister();
+ CHECK_EQ(size, 4u);
+ __ LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
+ __ StoreToOffset(kStoreWord,
+ scratch,
+ dest_base.AsArm().AsCoreRegister(),
+ dest_offset.Int32Value());
+}
+
+void ArmJNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ FrameOffset /*src_base*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*mscratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmJNIMacroAssembler::Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ CHECK_EQ(size, 4u);
+ Register scratch = mscratch.AsArm().AsCoreRegister();
+ __ LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
+}
+
+void ArmJNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ Offset /*dest_offset*/,
+ FrameOffset /*src*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*scratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmJNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
+ ArmManagedRegister out_reg = mout_reg.AsArm();
+ ArmManagedRegister in_reg = min_reg.AsArm();
+ CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
+ CHECK(out_reg.IsCoreRegister()) << out_reg;
+ if (null_allowed) {
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ if (in_reg.IsNoRegister()) {
+ __ LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
+ in_reg = out_reg;
+ }
+ __ cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+ if (!out_reg.Equals(in_reg)) {
+ __ it(EQ, kItElse);
+ __ LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+ } else {
+ __ it(NE);
+ }
+ __ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
+ } else {
+ __ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
+ }
+}
+
+void ArmJNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ if (null_allowed) {
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
+ __ cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+ __ it(NE);
+ __ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
+ } else {
+ __ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
+ }
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
+}
+
+void ArmJNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
+ ManagedRegister min_reg) {
+ ArmManagedRegister out_reg = mout_reg.AsArm();
+ ArmManagedRegister in_reg = min_reg.AsArm();
+ CHECK(out_reg.IsCoreRegister()) << out_reg;
+ CHECK(in_reg.IsCoreRegister()) << in_reg;
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ __ LoadImmediate(out_reg.AsCoreRegister(), 0, EQ); // TODO: why EQ?
+ }
+ __ cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+ __ it(NE);
+ __ LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), in_reg.AsCoreRegister(), 0, NE);
+}
+
+void ArmJNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references.
+}
+
+void ArmJNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references.
+}
+
+void ArmJNIMacroAssembler::Call(ManagedRegister mbase, Offset offset,
+ ManagedRegister mscratch) {
+ ArmManagedRegister base = mbase.AsArm();
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(base.IsCoreRegister()) << base;
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ __ LoadFromOffset(kLoadWord,
+ scratch.AsCoreRegister(),
+ base.AsCoreRegister(),
+ offset.Int32Value());
+ __ blx(scratch.AsCoreRegister());
+ // TODO: place reference map on call.
+}
+
+void ArmJNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ // Call *(*(SP + base) + offset)
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, base.Int32Value());
+ __ LoadFromOffset(kLoadWord,
+ scratch.AsCoreRegister(),
+ scratch.AsCoreRegister(),
+ offset.Int32Value());
+ __ blx(scratch.AsCoreRegister());
+ // TODO: place reference map on call
+}
+
+void ArmJNIMacroAssembler::CallFromThread(ThreadOffset32 offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmJNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
+ __ mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
+}
+
+void ArmJNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister /*scratch*/) {
+ __ StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
+}
+
+void ArmJNIMacroAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ ArmExceptionSlowPath* slow = new (__ GetArena()) ArmExceptionSlowPath(scratch, stack_adjust);
+ __ GetBuffer()->EnqueueSlowPath(slow);
+ __ LoadFromOffset(kLoadWord,
+ scratch.AsCoreRegister(),
+ TR,
+ Thread::ExceptionOffset<kArmPointerSize>().Int32Value());
+ __ cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+ __ b(slow->Entry(), NE);
+}
+
+#undef __
+
+void ArmExceptionSlowPath::Emit(Assembler* sasm) {
+ ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
+#define __ sp_asm->
+ __ Bind(&entry_);
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSizeImpl(sp_asm, stack_adjust_);
+ }
+ // Pass exception object as argument.
+ // Don't care about preserving R0 as this call won't return.
+ __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
+ // Set up call to Thread::Current()->pDeliverException.
+ __ LoadFromOffset(kLoadWord,
+ R12,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pDeliverException).Int32Value());
+ __ blx(R12);
+#undef __
+}
+
+void ArmJNIMacroAssembler::MemoryBarrier(ManagedRegister mscratch) {
+ CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
+ asm_->dmb(SY);
+}
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/utils/arm/jni_macro_assembler_arm.h b/compiler/utils/arm/jni_macro_assembler_arm.h
new file mode 100644
index 0000000..4471906
--- /dev/null
+++ b/compiler/utils/arm/jni_macro_assembler_arm.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_JNI_MACRO_ASSEMBLER_ARM_H_
+#define ART_COMPILER_UTILS_ARM_JNI_MACRO_ASSEMBLER_ARM_H_
+
+#include <memory>
+#include <type_traits>
+#include <vector>
+
+#include "arch/instruction_set.h"
+#include "base/enums.h"
+#include "base/macros.h"
+#include "utils/jni_macro_assembler.h"
+#include "offsets.h"
+
+namespace art {
+namespace arm {
+
+class ArmAssembler;
+
+class ArmJNIMacroAssembler : public JNIMacroAssembler<PointerSize::k32> {
+ public:
+ ArmJNIMacroAssembler(ArenaAllocator* arena, InstructionSet isa);
+ virtual ~ArmJNIMacroAssembler();
+
+ size_t CodeSize() const OVERRIDE;
+ DebugFrameOpCodeWriterForAssembler& cfi() OVERRIDE;
+ void FinalizeCode() OVERRIDE;
+ void FinalizeInstructions(const MemoryRegion& region) OVERRIDE;
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack
+ void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+ // Emit code that will remove an activation from the stack
+ void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
+ OVERRIDE;
+
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+ // Store routines
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+
+ void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Load routines
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+
+ void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool unpoison_reference) OVERRIDE;
+
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+
+ // Copying routines
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ OVERRIDE;
+
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
+ ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+ ManagedRegister scratch, size_t size) OVERRIDE;
+
+ // Sign extension
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Zero extension
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Exploit fast access in managed code to Thread::Current()
+ void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
+
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+ // Call to address held at [base+offset]
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+
+ void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+
+ private:
+ std::unique_ptr<ArmAssembler> asm_;
+};
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM_JNI_MACRO_ASSEMBLER_ARM_H_
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 53685bf..22221e7 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -32,9 +32,6 @@
#endif
void Arm64Assembler::FinalizeCode() {
- for (const std::unique_ptr<Arm64Exception>& exception : exception_blocks_) {
- EmitExceptionPoll(exception.get());
- }
___ FinalizeCode();
}
@@ -52,254 +49,6 @@
region.CopyFrom(0, from);
}
-void Arm64Assembler::GetCurrentThread(ManagedRegister tr) {
- ___ Mov(reg_x(tr.AsArm64().AsXRegister()), reg_x(TR));
-}
-
-void Arm64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
- StoreToOffset(TR, SP, offset.Int32Value());
-}
-
-// See Arm64 PCS Section 5.2.2.1.
-void Arm64Assembler::IncreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- AddConstant(SP, -adjust);
- cfi().AdjustCFAOffset(adjust);
-}
-
-// See Arm64 PCS Section 5.2.2.1.
-void Arm64Assembler::DecreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- AddConstant(SP, adjust);
- cfi().AdjustCFAOffset(-adjust);
-}
-
-void Arm64Assembler::AddConstant(XRegister rd, int32_t value, Condition cond) {
- AddConstant(rd, rd, value, cond);
-}
-
-void Arm64Assembler::AddConstant(XRegister rd, XRegister rn, int32_t value,
- Condition cond) {
- if ((cond == al) || (cond == nv)) {
- // VIXL macro-assembler handles all variants.
- ___ Add(reg_x(rd), reg_x(rn), value);
- } else {
- // temp = rd + value
- // rd = cond ? temp : rn
- UseScratchRegisterScope temps(&vixl_masm_);
- temps.Exclude(reg_x(rd), reg_x(rn));
- Register temp = temps.AcquireX();
- ___ Add(temp, reg_x(rn), value);
- ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
- }
-}
-
-void Arm64Assembler::StoreWToOffset(StoreOperandType type, WRegister source,
- XRegister base, int32_t offset) {
- switch (type) {
- case kStoreByte:
- ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset));
- break;
- case kStoreHalfword:
- ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset));
- break;
- case kStoreWord:
- ___ Str(reg_w(source), MEM_OP(reg_x(base), offset));
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- }
-}
-
-void Arm64Assembler::StoreToOffset(XRegister source, XRegister base, int32_t offset) {
- CHECK_NE(source, SP);
- ___ Str(reg_x(source), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::StoreSToOffset(SRegister source, XRegister base, int32_t offset) {
- ___ Str(reg_s(source), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::StoreDToOffset(DRegister source, XRegister base, int32_t offset) {
- ___ Str(reg_d(source), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
- Arm64ManagedRegister src = m_src.AsArm64();
- if (src.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (src.IsWRegister()) {
- CHECK_EQ(4u, size);
- StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value());
- } else if (src.IsXRegister()) {
- CHECK_EQ(8u, size);
- StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
- } else if (src.IsSRegister()) {
- StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value());
- } else {
- CHECK(src.IsDRegister()) << src;
- StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value());
- }
-}
-
-void Arm64Assembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
- Arm64ManagedRegister src = m_src.AsArm64();
- CHECK(src.IsXRegister()) << src;
- StoreWToOffset(kStoreWord, src.AsOverlappingWRegister(), SP,
- offs.Int32Value());
-}
-
-void Arm64Assembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
- Arm64ManagedRegister src = m_src.AsArm64();
- CHECK(src.IsXRegister()) << src;
- StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
-}
-
-void Arm64Assembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- LoadImmediate(scratch.AsXRegister(), imm);
- StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP,
- offs.Int32Value());
-}
-
-void Arm64Assembler::StoreStackOffsetToThread(ThreadOffset64 tr_offs,
- FrameOffset fr_offs,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
- StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
-}
-
-void Arm64Assembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) {
- UseScratchRegisterScope temps(&vixl_masm_);
- Register temp = temps.AcquireX();
- ___ Mov(temp, reg_x(SP));
- ___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
-}
-
-void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_source,
- FrameOffset in_off, ManagedRegister m_scratch) {
- Arm64ManagedRegister source = m_source.AsArm64();
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- StoreToOffset(source.AsXRegister(), SP, dest_off.Int32Value());
- LoadFromOffset(scratch.AsXRegister(), SP, in_off.Int32Value());
- StoreToOffset(scratch.AsXRegister(), SP, dest_off.Int32Value() + 8);
-}
-
-// Load routines.
-void Arm64Assembler::LoadImmediate(XRegister dest, int32_t value,
- Condition cond) {
- if ((cond == al) || (cond == nv)) {
- ___ Mov(reg_x(dest), value);
- } else {
- // temp = value
- // rd = cond ? temp : rd
- if (value != 0) {
- UseScratchRegisterScope temps(&vixl_masm_);
- temps.Exclude(reg_x(dest));
- Register temp = temps.AcquireX();
- ___ Mov(temp, value);
- ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
- } else {
- ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond);
- }
- }
-}
-
-void Arm64Assembler::LoadWFromOffset(LoadOperandType type, WRegister dest,
- XRegister base, int32_t offset) {
- switch (type) {
- case kLoadSignedByte:
- ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset));
- break;
- case kLoadSignedHalfword:
- ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset));
- break;
- case kLoadUnsignedByte:
- ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset));
- break;
- case kLoadUnsignedHalfword:
- ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset));
- break;
- case kLoadWord:
- ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset));
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- }
-}
-
-// Note: We can extend this member by adding load type info - see
-// sign extended A64 load variants.
-void Arm64Assembler::LoadFromOffset(XRegister dest, XRegister base,
- int32_t offset) {
- CHECK_NE(dest, SP);
- ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::LoadSFromOffset(SRegister dest, XRegister base,
- int32_t offset) {
- ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::LoadDFromOffset(DRegister dest, XRegister base,
- int32_t offset) {
- ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::Load(Arm64ManagedRegister dest, XRegister base,
- int32_t offset, size_t size) {
- if (dest.IsNoRegister()) {
- CHECK_EQ(0u, size) << dest;
- } else if (dest.IsWRegister()) {
- CHECK_EQ(4u, size) << dest;
- ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
- } else if (dest.IsXRegister()) {
- CHECK_NE(dest.AsXRegister(), SP) << dest;
- if (size == 4u) {
- ___ Ldr(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
- } else {
- CHECK_EQ(8u, size) << dest;
- ___ Ldr(reg_x(dest.AsXRegister()), MEM_OP(reg_x(base), offset));
- }
- } else if (dest.IsSRegister()) {
- ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset));
- } else {
- CHECK(dest.IsDRegister()) << dest;
- ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset));
- }
-}
-
-void Arm64Assembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
- return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
-}
-
-void Arm64Assembler::LoadFromThread(ManagedRegister m_dst, ThreadOffset64 src, size_t size) {
- return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
-}
-
-void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
- Arm64ManagedRegister dst = m_dst.AsArm64();
- CHECK(dst.IsXRegister()) << dst;
- LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value());
-}
-
-void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, MemberOffset offs,
- bool unpoison_reference) {
- Arm64ManagedRegister dst = m_dst.AsArm64();
- Arm64ManagedRegister base = m_base.AsArm64();
- CHECK(dst.IsXRegister() && base.IsXRegister());
- LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(),
- offs.Int32Value());
- if (unpoison_reference) {
- WRegister ref_reg = dst.AsOverlappingWRegister();
- MaybeUnpoisonHeapReference(reg_w(ref_reg));
- }
-}
-
void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) {
Arm64ManagedRegister dst = m_dst.AsArm64();
Arm64ManagedRegister base = m_base.AsArm64();
@@ -310,209 +59,6 @@
___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
}
-void Arm64Assembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset64 offs) {
- Arm64ManagedRegister dst = m_dst.AsArm64();
- CHECK(dst.IsXRegister()) << dst;
- LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
-}
-
-// Copying routines.
-void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) {
- Arm64ManagedRegister dst = m_dst.AsArm64();
- Arm64ManagedRegister src = m_src.AsArm64();
- if (!dst.Equals(src)) {
- if (dst.IsXRegister()) {
- if (size == 4) {
- CHECK(src.IsWRegister());
- ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
- } else {
- if (src.IsXRegister()) {
- ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
- } else {
- ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
- }
- }
- } else if (dst.IsWRegister()) {
- CHECK(src.IsWRegister()) << src;
- ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister()));
- } else if (dst.IsSRegister()) {
- CHECK(src.IsSRegister()) << src;
- ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister()));
- } else {
- CHECK(dst.IsDRegister()) << dst;
- CHECK(src.IsDRegister()) << src;
- ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister()));
- }
- }
-}
-
-void Arm64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs,
- ThreadOffset64 tr_offs,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
- StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
-}
-
-void Arm64Assembler::CopyRawPtrToThread(ThreadOffset64 tr_offs,
- FrameOffset fr_offs,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
- StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
-}
-
-void Arm64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(),
- SP, src.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(),
- SP, dest.Int32Value());
-}
-
-void Arm64Assembler::Copy(FrameOffset dest, FrameOffset src,
- ManagedRegister m_scratch, size_t size) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, src.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, dest.Int32Value());
- } else if (size == 8) {
- LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
- StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
- } else {
- UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
- }
-}
-
-void Arm64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
- ManagedRegister m_scratch, size_t size) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- Arm64ManagedRegister base = src_base.AsArm64();
- CHECK(base.IsXRegister()) << base;
- CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsXRegister(),
- src_offset.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
- } else if (size == 8) {
- LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), src_offset.Int32Value());
- StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
- } else {
- UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
- }
-}
-
-void Arm64Assembler::Copy(ManagedRegister m_dest_base, Offset dest_offs, FrameOffset src,
- ManagedRegister m_scratch, size_t size) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- Arm64ManagedRegister base = m_dest_base.AsArm64();
- CHECK(base.IsXRegister()) << base;
- CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsXRegister(),
- dest_offs.Int32Value());
- } else if (size == 8) {
- LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
- StoreToOffset(scratch.AsXRegister(), base.AsXRegister(), dest_offs.Int32Value());
- } else {
- UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
- }
-}
-
-void Arm64Assembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
- ManagedRegister /*mscratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
-}
-
-void Arm64Assembler::Copy(ManagedRegister m_dest, Offset dest_offset,
- ManagedRegister m_src, Offset src_offset,
- ManagedRegister m_scratch, size_t size) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- Arm64ManagedRegister src = m_src.AsArm64();
- Arm64ManagedRegister dest = m_dest.AsArm64();
- CHECK(dest.IsXRegister()) << dest;
- CHECK(src.IsXRegister()) << src;
- CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- if (scratch.IsWRegister()) {
- LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsXRegister(),
- src_offset.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsXRegister(),
- dest_offset.Int32Value());
- } else {
- LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), src.AsXRegister(),
- src_offset.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), dest.AsXRegister(),
- dest_offset.Int32Value());
- }
- } else if (size == 8) {
- LoadFromOffset(scratch.AsXRegister(), src.AsXRegister(), src_offset.Int32Value());
- StoreToOffset(scratch.AsXRegister(), dest.AsXRegister(), dest_offset.Int32Value());
- } else {
- UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
- }
-}
-
-void Arm64Assembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/,
- FrameOffset /*src*/, Offset /*src_offset*/,
- ManagedRegister /*scratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
-}
-
-void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
- // TODO: Should we check that m_scratch is IP? - see arm.
- ___ Dmb(InnerShareable, BarrierAll);
-}
-
-void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
- Arm64ManagedRegister reg = mreg.AsArm64();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsWRegister()) << reg;
- if (size == 1) {
- ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
- } else {
- ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
- }
-}
-
-void Arm64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
- Arm64ManagedRegister reg = mreg.AsArm64();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsWRegister()) << reg;
- if (size == 1) {
- ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
- } else {
- ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
- }
-}
-
-void Arm64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references.
-}
-
-void Arm64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references.
-}
-
-void Arm64Assembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
- Arm64ManagedRegister base = m_base.AsArm64();
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(base.IsXRegister()) << base;
- CHECK(scratch.IsXRegister()) << scratch;
- LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), offs.Int32Value());
- ___ Blr(reg_x(scratch.AsXRegister()));
-}
-
void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
Arm64ManagedRegister base = m_base.AsArm64();
Arm64ManagedRegister scratch = m_scratch.AsArm64();
@@ -525,114 +71,6 @@
___ Br(reg_x(scratch.AsXRegister()));
}
-void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- // Call *(*(SP + base) + offset)
- LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value());
- LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value());
- ___ Blr(reg_x(scratch.AsXRegister()));
-}
-
-void Arm64Assembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
-}
-
-void Arm64Assembler::CreateHandleScopeEntry(
- ManagedRegister m_out_reg, FrameOffset handle_scope_offs, ManagedRegister m_in_reg,
- bool null_allowed) {
- Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
- Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
- // For now we only hold stale handle scope entries in x registers.
- CHECK(in_reg.IsNoRegister() || in_reg.IsXRegister()) << in_reg;
- CHECK(out_reg.IsXRegister()) << out_reg;
- if (null_allowed) {
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
- if (in_reg.IsNoRegister()) {
- LoadWFromOffset(kLoadWord, out_reg.AsOverlappingWRegister(), SP,
- handle_scope_offs.Int32Value());
- in_reg = out_reg;
- }
- ___ Cmp(reg_w(in_reg.AsOverlappingWRegister()), 0);
- if (!out_reg.Equals(in_reg)) {
- LoadImmediate(out_reg.AsXRegister(), 0, eq);
- }
- AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), ne);
- } else {
- AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), al);
- }
-}
-
-void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset,
- ManagedRegister m_scratch, bool null_allowed) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- if (null_allowed) {
- LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP,
- handle_scope_offset.Int32Value());
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
- ___ Cmp(reg_w(scratch.AsOverlappingWRegister()), 0);
- // Move this logic in add constants with flags.
- AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), ne);
- } else {
- AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), al);
- }
- StoreToOffset(scratch.AsXRegister(), SP, out_off.Int32Value());
-}
-
-void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
- ManagedRegister m_in_reg) {
- Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
- Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
- CHECK(out_reg.IsXRegister()) << out_reg;
- CHECK(in_reg.IsXRegister()) << in_reg;
- vixl::aarch64::Label exit;
- if (!out_reg.Equals(in_reg)) {
- // FIXME: Who sets the flags here?
- LoadImmediate(out_reg.AsXRegister(), 0, eq);
- }
- ___ Cbz(reg_x(in_reg.AsXRegister()), &exit);
- LoadFromOffset(out_reg.AsXRegister(), in_reg.AsXRegister(), 0);
- ___ Bind(&exit);
-}
-
-void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
- CHECK_ALIGNED(stack_adjust, kStackAlignment);
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust));
- LoadFromOffset(scratch.AsXRegister(),
- TR,
- Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
- ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
-}
-
-void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
- UseScratchRegisterScope temps(&vixl_masm_);
- temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
- Register temp = temps.AcquireX();
-
- // Bind exception poll entry.
- ___ Bind(exception->Entry());
- if (exception->stack_adjust_ != 0) { // Fix up the frame.
- DecreaseFrameSize(exception->stack_adjust_);
- }
- // Pass exception object as argument.
- // Don't care about preserving X0 as this won't return.
- ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister()));
- ___ Ldr(temp,
- MEM_OP(reg_x(TR),
- QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value()));
-
- ___ Blr(temp);
- // Call should never return.
- ___ Brk();
-}
-
static inline dwarf::Reg DWARFReg(CPURegister reg) {
if (reg.IsFPRegister()) {
return dwarf::Reg::Arm64Fp(reg.GetCode());
@@ -696,105 +134,6 @@
DCHECK(registers.IsEmpty());
}
-void Arm64Assembler::BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) {
- // Setup VIXL CPURegList for callee-saves.
- CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
- CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
- for (auto r : callee_save_regs) {
- Arm64ManagedRegister reg = r.AsArm64();
- if (reg.IsXRegister()) {
- core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
- } else {
- DCHECK(reg.IsDRegister());
- fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
- }
- }
- size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
- size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
-
- // Increase frame to required size.
- DCHECK_ALIGNED(frame_size, kStackAlignment);
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
- IncreaseFrameSize(frame_size);
-
- // Save callee-saves.
- SpillRegisters(core_reg_list, frame_size - core_reg_size);
- SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
-
- DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
-
- // Write ArtMethod*
- DCHECK(X0 == method_reg.AsArm64().AsXRegister());
- StoreToOffset(X0, SP, 0);
-
- // Write out entry spills
- int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize);
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
- if (reg.IsNoRegister()) {
- // only increment stack offset.
- ManagedRegisterSpill spill = entry_spills.at(i);
- offset += spill.getSize();
- } else if (reg.IsXRegister()) {
- StoreToOffset(reg.AsXRegister(), SP, offset);
- offset += 8;
- } else if (reg.IsWRegister()) {
- StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset);
- offset += 4;
- } else if (reg.IsDRegister()) {
- StoreDToOffset(reg.AsDRegister(), SP, offset);
- offset += 8;
- } else if (reg.IsSRegister()) {
- StoreSToOffset(reg.AsSRegister(), SP, offset);
- offset += 4;
- }
- }
-}
-
-void Arm64Assembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) {
- // Setup VIXL CPURegList for callee-saves.
- CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
- CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
- for (auto r : callee_save_regs) {
- Arm64ManagedRegister reg = r.AsArm64();
- if (reg.IsXRegister()) {
- core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
- } else {
- DCHECK(reg.IsDRegister());
- fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
- }
- }
- size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
- size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
-
- // For now we only check that the size of the frame is large enough to hold spills and method
- // reference.
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
- DCHECK_ALIGNED(frame_size, kStackAlignment);
-
- DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
-
- cfi_.RememberState();
-
- // Restore callee-saves.
- UnspillRegisters(core_reg_list, frame_size - core_reg_size);
- UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
-
- // Decrease frame size to start of callee saved regs.
- DecreaseFrameSize(frame_size);
-
- // Pop callee saved and return to LR.
- ___ Ret();
-
- // The CFI should be restored for any code that follows the exit block.
- cfi_.RestoreState();
- cfi_.DefCFAOffset(frame_size);
-}
-
void Arm64Assembler::PoisonHeapReference(Register reg) {
DCHECK(reg.IsW());
// reg = -reg.
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index d7084da..4e88e64 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -22,11 +22,9 @@
#include <vector>
#include "base/arena_containers.h"
-#include "base/enums.h"
#include "base/logging.h"
#include "utils/arm64/managed_register_arm64.h"
#include "utils/assembler.h"
-#include "utils/jni_macro_assembler.h"
#include "offsets.h"
// TODO: make vixl clean wrt -Wshadow, -Wunknown-pragmas, -Wmissing-noreturn
@@ -63,38 +61,14 @@
kStoreDWord
};
-class Arm64Exception {
- private:
- Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust)
- : scratch_(scratch), stack_adjust_(stack_adjust) {
- }
-
- vixl::aarch64::Label* Entry() { return &exception_entry_; }
-
- // Register used for passing Thread::Current()->exception_ .
- const Arm64ManagedRegister scratch_;
-
- // Stack adjust for ExceptionPool.
- const size_t stack_adjust_;
-
- vixl::aarch64::Label exception_entry_;
-
- friend class Arm64Assembler;
- DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
-};
-
-class Arm64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
+class Arm64Assembler FINAL : public Assembler {
public:
- explicit Arm64Assembler(ArenaAllocator* arena)
- : Assembler(arena),
- exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
+ explicit Arm64Assembler(ArenaAllocator* arena) : Assembler(arena) {}
virtual ~Arm64Assembler() {}
vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
- DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
-
// Finalize the code.
void FinalizeCode() OVERRIDE;
@@ -105,110 +79,14 @@
// Copy instructions out of assembly buffer into the given region of memory.
void FinalizeInstructions(const MemoryRegion& region);
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs);
+
void SpillRegisters(vixl::aarch64::CPURegList registers, int offset);
void UnspillRegisters(vixl::aarch64::CPURegList registers, int offset);
- // Emit code that will create an activation on the stack.
- void BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
-
- // Emit code that will remove an activation from the stack.
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
-
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
-
- // Store routines.
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
- void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
-
- // Load routines.
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
-
- // Copying routines.
- void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
- void CopyRawPtrFromThread(FrameOffset fr_offs,
- ThreadOffset64 thr_offs,
- ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
- void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
- void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
- size_t size) OVERRIDE;
- void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
- void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
- void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
- void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
-
- // Sign extension.
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Zero extension.
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Exploit fast access in managed code to Thread::Current().
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
-
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg,
- FrameOffset handlescope_offset,
- ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
-
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handlescope_offset,
- ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
-
- // src holds a handle scope entry (Object**) load this into dst.
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
-
- // Heap::VerifyObject on src. In some cases (such as a reference to this) we
- // know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
-
- // Call to address held at [base+offset].
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
-
// Jump to address (not setting link register)
void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch);
- // Generate code to check if Thread::Current()->exception_ is non-null
- // and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
-
//
// Heap poisoning.
//
@@ -227,7 +105,6 @@
UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM64";
}
- private:
static vixl::aarch64::Register reg_x(int code) {
CHECK(code < kNumberOfXRegisters) << code;
if (code == SP) {
@@ -256,37 +133,7 @@
return vixl::aarch64::FPRegister::GetSRegFromCode(code);
}
- // Emits Exception block.
- void EmitExceptionPoll(Arm64Exception *exception);
-
- void StoreWToOffset(StoreOperandType type, WRegister source,
- XRegister base, int32_t offset);
- void StoreToOffset(XRegister source, XRegister base, int32_t offset);
- void StoreSToOffset(SRegister source, XRegister base, int32_t offset);
- void StoreDToOffset(DRegister source, XRegister base, int32_t offset);
-
- void LoadImmediate(XRegister dest,
- int32_t value,
- vixl::aarch64::Condition cond = vixl::aarch64::al);
- void Load(Arm64ManagedRegister dst, XRegister src, int32_t src_offset, size_t size);
- void LoadWFromOffset(LoadOperandType type,
- WRegister dest,
- XRegister base,
- int32_t offset);
- void LoadFromOffset(XRegister dest, XRegister base, int32_t offset);
- void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset);
- void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset);
- void AddConstant(XRegister rd,
- int32_t value,
- vixl::aarch64::Condition cond = vixl::aarch64::al);
- void AddConstant(XRegister rd,
- XRegister rn,
- int32_t value,
- vixl::aarch64::Condition cond = vixl::aarch64::al);
-
- // List of exception blocks to generate at the end of the code cache.
- ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
-
+ private:
// VIXL assembler.
vixl::aarch64::MacroAssembler vixl_masm_;
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
new file mode 100644
index 0000000..dfdcd11
--- /dev/null
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -0,0 +1,754 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler_arm64.h"
+
+#include "base/logging.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "managed_register_arm64.h"
+#include "offsets.h"
+#include "thread.h"
+
+using namespace vixl::aarch64; // NOLINT(build/namespaces)
+
+namespace art {
+namespace arm64 {
+
+#ifdef ___
+#error "ARM64 Assembler macro already defined."
+#else
+#define ___ asm_.GetVIXLAssembler()->
+#endif
+
+#define reg_x(X) Arm64Assembler::reg_x(X)
+#define reg_w(W) Arm64Assembler::reg_w(W)
+#define reg_d(D) Arm64Assembler::reg_d(D)
+#define reg_s(S) Arm64Assembler::reg_s(S)
+
+Arm64JNIMacroAssembler::~Arm64JNIMacroAssembler() {
+}
+
+void Arm64JNIMacroAssembler::FinalizeCode() {
+ for (const std::unique_ptr<Arm64Exception>& exception : exception_blocks_) {
+ EmitExceptionPoll(exception.get());
+ }
+ ___ FinalizeCode();
+}
+
+void Arm64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
+ ___ Mov(reg_x(tr.AsArm64().AsXRegister()), reg_x(TR));
+}
+
+void Arm64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
+ StoreToOffset(TR, SP, offset.Int32Value());
+}
+
+// See Arm64 PCS Section 5.2.2.1.
+void Arm64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ AddConstant(SP, -adjust);
+ cfi().AdjustCFAOffset(adjust);
+}
+
+// See Arm64 PCS Section 5.2.2.1.
+void Arm64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ AddConstant(SP, adjust);
+ cfi().AdjustCFAOffset(-adjust);
+}
+
+void Arm64JNIMacroAssembler::AddConstant(XRegister rd, int32_t value, Condition cond) {
+ AddConstant(rd, rd, value, cond);
+}
+
+void Arm64JNIMacroAssembler::AddConstant(XRegister rd,
+ XRegister rn,
+ int32_t value,
+ Condition cond) {
+ if ((cond == al) || (cond == nv)) {
+ // VIXL macro-assembler handles all variants.
+ ___ Add(reg_x(rd), reg_x(rn), value);
+ } else {
+ // temp = rd + value
+ // rd = cond ? temp : rn
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ temps.Exclude(reg_x(rd), reg_x(rn));
+ Register temp = temps.AcquireX();
+ ___ Add(temp, reg_x(rn), value);
+ ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
+ }
+}
+
+void Arm64JNIMacroAssembler::StoreWToOffset(StoreOperandType type,
+ WRegister source,
+ XRegister base,
+ int32_t offset) {
+ switch (type) {
+ case kStoreByte:
+ ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset));
+ break;
+ case kStoreHalfword:
+ ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset));
+ break;
+ case kStoreWord:
+ ___ Str(reg_w(source), MEM_OP(reg_x(base), offset));
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+void Arm64JNIMacroAssembler::StoreToOffset(XRegister source, XRegister base, int32_t offset) {
+ CHECK_NE(source, SP);
+ ___ Str(reg_x(source), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::StoreSToOffset(SRegister source, XRegister base, int32_t offset) {
+ ___ Str(reg_s(source), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::StoreDToOffset(DRegister source, XRegister base, int32_t offset) {
+ ___ Str(reg_d(source), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
+ Arm64ManagedRegister src = m_src.AsArm64();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsWRegister()) {
+ CHECK_EQ(4u, size);
+ StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value());
+ } else if (src.IsXRegister()) {
+ CHECK_EQ(8u, size);
+ StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
+ } else if (src.IsSRegister()) {
+ StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value());
+ } else {
+ CHECK(src.IsDRegister()) << src;
+ StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value());
+ }
+}
+
+void Arm64JNIMacroAssembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
+ Arm64ManagedRegister src = m_src.AsArm64();
+ CHECK(src.IsXRegister()) << src;
+ StoreWToOffset(kStoreWord, src.AsOverlappingWRegister(), SP,
+ offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
+ Arm64ManagedRegister src = m_src.AsArm64();
+ CHECK(src.IsXRegister()) << src;
+ StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset offs,
+ uint32_t imm,
+ ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ LoadImmediate(scratch.AsXRegister(), imm);
+ StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP,
+ offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 tr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) {
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ Register temp = temps.AcquireX();
+ ___ Mov(temp, reg_x(SP));
+ ___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
+}
+
+void Arm64JNIMacroAssembler::StoreSpanning(FrameOffset dest_off,
+ ManagedRegister m_source,
+ FrameOffset in_off,
+ ManagedRegister m_scratch) {
+ Arm64ManagedRegister source = m_source.AsArm64();
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ StoreToOffset(source.AsXRegister(), SP, dest_off.Int32Value());
+ LoadFromOffset(scratch.AsXRegister(), SP, in_off.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), SP, dest_off.Int32Value() + 8);
+}
+
+// Load routines.
+void Arm64JNIMacroAssembler::LoadImmediate(XRegister dest, int32_t value, Condition cond) {
+ if ((cond == al) || (cond == nv)) {
+ ___ Mov(reg_x(dest), value);
+ } else {
+ // temp = value
+ // rd = cond ? temp : rd
+ if (value != 0) {
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ temps.Exclude(reg_x(dest));
+ Register temp = temps.AcquireX();
+ ___ Mov(temp, value);
+ ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
+ } else {
+ ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond);
+ }
+ }
+}
+
+void Arm64JNIMacroAssembler::LoadWFromOffset(LoadOperandType type,
+ WRegister dest,
+ XRegister base,
+ int32_t offset) {
+ switch (type) {
+ case kLoadSignedByte:
+ ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset));
+ break;
+ case kLoadSignedHalfword:
+ ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset));
+ break;
+ case kLoadUnsignedByte:
+ ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset));
+ break;
+ case kLoadUnsignedHalfword:
+ ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset));
+ break;
+ case kLoadWord:
+ ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset));
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+// Note: We can extend this member by adding load type info - see
+// sign extended A64 load variants.
+void Arm64JNIMacroAssembler::LoadFromOffset(XRegister dest, XRegister base, int32_t offset) {
+ CHECK_NE(dest, SP);
+ ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::LoadSFromOffset(SRegister dest, XRegister base, int32_t offset) {
+ ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::LoadDFromOffset(DRegister dest, XRegister base, int32_t offset) {
+ ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::Load(Arm64ManagedRegister dest,
+ XRegister base,
+ int32_t offset,
+ size_t size) {
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size) << dest;
+ } else if (dest.IsWRegister()) {
+ CHECK_EQ(4u, size) << dest;
+ ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
+ } else if (dest.IsXRegister()) {
+ CHECK_NE(dest.AsXRegister(), SP) << dest;
+ if (size == 4u) {
+ ___ Ldr(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
+ } else {
+ CHECK_EQ(8u, size) << dest;
+ ___ Ldr(reg_x(dest.AsXRegister()), MEM_OP(reg_x(base), offset));
+ }
+ } else if (dest.IsSRegister()) {
+ ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset));
+ } else {
+ CHECK(dest.IsDRegister()) << dest;
+ ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset));
+ }
+}
+
+void Arm64JNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
+ return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
+}
+
+void Arm64JNIMacroAssembler::LoadFromThread(ManagedRegister m_dst,
+ ThreadOffset64 src,
+ size_t size) {
+ return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
+}
+
+void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ CHECK(dst.IsXRegister()) << dst;
+ LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst,
+ ManagedRegister m_base,
+ MemberOffset offs,
+ bool unpoison_reference) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ Arm64ManagedRegister base = m_base.AsArm64();
+ CHECK(dst.IsXRegister() && base.IsXRegister());
+ LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(),
+ offs.Int32Value());
+ if (unpoison_reference) {
+ WRegister ref_reg = dst.AsOverlappingWRegister();
+ asm_.MaybeUnpoisonHeapReference(reg_w(ref_reg));
+ }
+}
+
+void Arm64JNIMacroAssembler::LoadRawPtr(ManagedRegister m_dst,
+ ManagedRegister m_base,
+ Offset offs) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ Arm64ManagedRegister base = m_base.AsArm64();
+ CHECK(dst.IsXRegister() && base.IsXRegister());
+ // Remove dst and base form the temp list - higher level API uses IP1, IP0.
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
+ ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
+}
+
+void Arm64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset64 offs) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ CHECK(dst.IsXRegister()) << dst;
+ LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
+}
+
+// Copying routines.
+void Arm64JNIMacroAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ Arm64ManagedRegister src = m_src.AsArm64();
+ if (!dst.Equals(src)) {
+ if (dst.IsXRegister()) {
+ if (size == 4) {
+ CHECK(src.IsWRegister());
+ ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
+ } else {
+ if (src.IsXRegister()) {
+ ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
+ } else {
+ ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
+ }
+ }
+ } else if (dst.IsWRegister()) {
+ CHECK(src.IsWRegister()) << src;
+ ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister()));
+ } else if (dst.IsSRegister()) {
+ CHECK(src.IsSRegister()) << src;
+ ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister()));
+ } else {
+ CHECK(dst.IsDRegister()) << dst;
+ CHECK(src.IsDRegister()) << src;
+ ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister()));
+ }
+ }
+}
+
+void Arm64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 tr_offs,
+ ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 tr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(),
+ SP, src.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(),
+ SP, dest.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src,
+ ManagedRegister m_scratch,
+ size_t size) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, src.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, dest.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister m_scratch,
+ size_t size) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ Arm64ManagedRegister base = src_base.AsArm64();
+ CHECK(base.IsXRegister()) << base;
+ CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsXRegister(),
+ src_offset.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), src_offset.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest_base,
+ Offset dest_offs,
+ FrameOffset src,
+ ManagedRegister m_scratch,
+ size_t size) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ Arm64ManagedRegister base = m_dest_base.AsArm64();
+ CHECK(base.IsXRegister()) << base;
+ CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsXRegister(),
+ dest_offs.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), base.AsXRegister(), dest_offs.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ FrameOffset /*src_base*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*mscratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
+}
+
+void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest,
+ Offset dest_offset,
+ ManagedRegister m_src,
+ Offset src_offset,
+ ManagedRegister m_scratch,
+ size_t size) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ Arm64ManagedRegister src = m_src.AsArm64();
+ Arm64ManagedRegister dest = m_dest.AsArm64();
+ CHECK(dest.IsXRegister()) << dest;
+ CHECK(src.IsXRegister()) << src;
+ CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ if (scratch.IsWRegister()) {
+ LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsXRegister(),
+ src_offset.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsXRegister(),
+ dest_offset.Int32Value());
+ } else {
+ LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), src.AsXRegister(),
+ src_offset.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), dest.AsXRegister(),
+ dest_offset.Int32Value());
+ }
+ } else if (size == 8) {
+ LoadFromOffset(scratch.AsXRegister(), src.AsXRegister(), src_offset.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), dest.AsXRegister(), dest_offset.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ Offset /*dest_offset*/,
+ FrameOffset /*src*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*scratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
+}
+
+void Arm64JNIMacroAssembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
+ // TODO: Should we check that m_scratch is IP? - see arm.
+ ___ Dmb(InnerShareable, BarrierAll);
+}
+
+void Arm64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
+ Arm64ManagedRegister reg = mreg.AsArm64();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsWRegister()) << reg;
+ if (size == 1) {
+ ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ } else {
+ ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ }
+}
+
+void Arm64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
+ Arm64ManagedRegister reg = mreg.AsArm64();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsWRegister()) << reg;
+ if (size == 1) {
+ ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ } else {
+ ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ }
+}
+
+void Arm64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references.
+}
+
+void Arm64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references.
+}
+
+void Arm64JNIMacroAssembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
+ Arm64ManagedRegister base = m_base.AsArm64();
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(base.IsXRegister()) << base;
+ CHECK(scratch.IsXRegister()) << scratch;
+ LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), offs.Int32Value());
+ ___ Blr(reg_x(scratch.AsXRegister()));
+}
+
+void Arm64JNIMacroAssembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ // Call *(*(SP + base) + offset)
+ LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value());
+ LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value());
+ ___ Blr(reg_x(scratch.AsXRegister()));
+}
+
+void Arm64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
+}
+
+void Arm64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister m_out_reg,
+ FrameOffset handle_scope_offs,
+ ManagedRegister m_in_reg,
+ bool null_allowed) {
+ Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
+ Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
+ // For now we only hold stale handle scope entries in x registers.
+ CHECK(in_reg.IsNoRegister() || in_reg.IsXRegister()) << in_reg;
+ CHECK(out_reg.IsXRegister()) << out_reg;
+ if (null_allowed) {
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ if (in_reg.IsNoRegister()) {
+ LoadWFromOffset(kLoadWord, out_reg.AsOverlappingWRegister(), SP,
+ handle_scope_offs.Int32Value());
+ in_reg = out_reg;
+ }
+ ___ Cmp(reg_w(in_reg.AsOverlappingWRegister()), 0);
+ if (!out_reg.Equals(in_reg)) {
+ LoadImmediate(out_reg.AsXRegister(), 0, eq);
+ }
+ AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), ne);
+ } else {
+ AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), al);
+ }
+}
+
+void Arm64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
+ ManagedRegister m_scratch,
+ bool null_allowed) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ if (null_allowed) {
+ LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP,
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
+ ___ Cmp(reg_w(scratch.AsOverlappingWRegister()), 0);
+ // Move this logic in add constants with flags.
+ AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), ne);
+ } else {
+ AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), al);
+ }
+ StoreToOffset(scratch.AsXRegister(), SP, out_off.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
+ ManagedRegister m_in_reg) {
+ Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
+ Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
+ CHECK(out_reg.IsXRegister()) << out_reg;
+ CHECK(in_reg.IsXRegister()) << in_reg;
+ vixl::aarch64::Label exit;
+ if (!out_reg.Equals(in_reg)) {
+ // FIXME: Who sets the flags here?
+ LoadImmediate(out_reg.AsXRegister(), 0, eq);
+ }
+ ___ Cbz(reg_x(in_reg.AsXRegister()), &exit);
+ LoadFromOffset(out_reg.AsXRegister(), in_reg.AsXRegister(), 0);
+ ___ Bind(&exit);
+}
+
+void Arm64JNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
+ CHECK_ALIGNED(stack_adjust, kStackAlignment);
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust));
+ LoadFromOffset(scratch.AsXRegister(),
+ TR,
+ Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
+ ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
+}
+
+void Arm64JNIMacroAssembler::EmitExceptionPoll(Arm64Exception *exception) {
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
+ Register temp = temps.AcquireX();
+
+ // Bind exception poll entry.
+ ___ Bind(exception->Entry());
+ if (exception->stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSize(exception->stack_adjust_);
+ }
+ // Pass exception object as argument.
+ // Don't care about preserving X0 as this won't return.
+ ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister()));
+ ___ Ldr(temp,
+ MEM_OP(reg_x(TR),
+ QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value()));
+
+ ___ Blr(temp);
+ // Call should never return.
+ ___ Brk();
+}
+
+void Arm64JNIMacroAssembler::BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ // Setup VIXL CPURegList for callee-saves.
+ CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
+ CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
+ for (auto r : callee_save_regs) {
+ Arm64ManagedRegister reg = r.AsArm64();
+ if (reg.IsXRegister()) {
+ core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
+ } else {
+ DCHECK(reg.IsDRegister());
+ fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
+ }
+ }
+ size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
+ size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
+
+ // Increase frame to required size.
+ DCHECK_ALIGNED(frame_size, kStackAlignment);
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
+ IncreaseFrameSize(frame_size);
+
+ // Save callee-saves.
+ asm_.SpillRegisters(core_reg_list, frame_size - core_reg_size);
+ asm_.SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
+
+ DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
+
+ // Write ArtMethod*
+ DCHECK(X0 == method_reg.AsArm64().AsXRegister());
+ StoreToOffset(X0, SP, 0);
+
+ // Write out entry spills
+ int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize);
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
+ if (reg.IsNoRegister()) {
+ // only increment stack offset.
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ offset += spill.getSize();
+ } else if (reg.IsXRegister()) {
+ StoreToOffset(reg.AsXRegister(), SP, offset);
+ offset += 8;
+ } else if (reg.IsWRegister()) {
+ StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset);
+ offset += 4;
+ } else if (reg.IsDRegister()) {
+ StoreDToOffset(reg.AsDRegister(), SP, offset);
+ offset += 8;
+ } else if (reg.IsSRegister()) {
+ StoreSToOffset(reg.AsSRegister(), SP, offset);
+ offset += 4;
+ }
+ }
+}
+
+void Arm64JNIMacroAssembler::RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs) {
+ // Setup VIXL CPURegList for callee-saves.
+ CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
+ CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
+ for (auto r : callee_save_regs) {
+ Arm64ManagedRegister reg = r.AsArm64();
+ if (reg.IsXRegister()) {
+ core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
+ } else {
+ DCHECK(reg.IsDRegister());
+ fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
+ }
+ }
+ size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
+ size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
+
+ // For now we only check that the size of the frame is large enough to hold spills and method
+ // reference.
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
+ DCHECK_ALIGNED(frame_size, kStackAlignment);
+
+ DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
+
+ cfi().RememberState();
+
+ // Restore callee-saves.
+ asm_.UnspillRegisters(core_reg_list, frame_size - core_reg_size);
+ asm_.UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
+
+ // Decrease frame size to start of callee saved regs.
+ DecreaseFrameSize(frame_size);
+
+ // Pop callee saved and return to LR.
+ ___ Ret();
+
+ // The CFI should be restored for any code that follows the exit block.
+ cfi().RestoreState();
+ cfi().DefCFAOffset(frame_size);
+}
+
+#undef ___
+
+} // namespace arm64
+} // namespace art
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
new file mode 100644
index 0000000..79ee441
--- /dev/null
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM64_JNI_MACRO_ASSEMBLER_ARM64_H_
+#define ART_COMPILER_UTILS_ARM64_JNI_MACRO_ASSEMBLER_ARM64_H_
+
+#include <stdint.h>
+#include <memory>
+#include <vector>
+
+#include "assembler_arm64.h"
+#include "base/arena_containers.h"
+#include "base/enums.h"
+#include "base/logging.h"
+#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
+#include "offsets.h"
+
+// TODO: make vixl clean wrt -Wshadow, -Wunknown-pragmas, -Wmissing-noreturn
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunknown-pragmas"
+#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wmissing-noreturn"
+#include "a64/macro-assembler-a64.h"
+#pragma GCC diagnostic pop
+
+namespace art {
+namespace arm64 {
+
+class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
+ public:
+ explicit Arm64JNIMacroAssembler(ArenaAllocator* arena)
+ : JNIMacroAssemblerFwd(arena),
+ exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
+
+ ~Arm64JNIMacroAssembler();
+
+ // Finalize the code.
+ void FinalizeCode() OVERRIDE;
+
+ // Emit code that will create an activation on the stack.
+ void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+ // Emit code that will remove an activation from the stack.
+ void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
+ OVERRIDE;
+
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+ // Store routines.
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+ void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+ void StoreSpanning(FrameOffset dest,
+ ManagedRegister src,
+ FrameOffset in_off,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Load routines.
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) OVERRIDE;
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+
+ // Copying routines.
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+ void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ OVERRIDE;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+ void Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+
+ // Sign extension.
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Zero extension.
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Exploit fast access in managed code to Thread::Current().
+ void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg,
+ FrameOffset handlescope_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) OVERRIDE;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ void CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handlescope_offset,
+ ManagedRegister scratch,
+ bool null_allowed) OVERRIDE;
+
+ // src holds a handle scope entry (Object**) load this into dst.
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+ // Call to address held at [base+offset].
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+
+ private:
+ class Arm64Exception {
+ public:
+ Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {}
+
+ vixl::aarch64::Label* Entry() { return &exception_entry_; }
+
+ // Register used for passing Thread::Current()->exception_ .
+ const Arm64ManagedRegister scratch_;
+
+ // Stack adjust for ExceptionPool.
+ const size_t stack_adjust_;
+
+ vixl::aarch64::Label exception_entry_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
+ };
+
+ // Emits Exception block.
+ void EmitExceptionPoll(Arm64Exception *exception);
+
+ void StoreWToOffset(StoreOperandType type,
+ WRegister source,
+ XRegister base,
+ int32_t offset);
+ void StoreToOffset(XRegister source, XRegister base, int32_t offset);
+ void StoreSToOffset(SRegister source, XRegister base, int32_t offset);
+ void StoreDToOffset(DRegister source, XRegister base, int32_t offset);
+
+ void LoadImmediate(XRegister dest,
+ int32_t value,
+ vixl::aarch64::Condition cond = vixl::aarch64::al);
+ void Load(Arm64ManagedRegister dst, XRegister src, int32_t src_offset, size_t size);
+ void LoadWFromOffset(LoadOperandType type,
+ WRegister dest,
+ XRegister base,
+ int32_t offset);
+ void LoadFromOffset(XRegister dest, XRegister base, int32_t offset);
+ void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset);
+ void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset);
+ void AddConstant(XRegister rd,
+ int32_t value,
+ vixl::aarch64::Condition cond = vixl::aarch64::al);
+ void AddConstant(XRegister rd,
+ XRegister rn,
+ int32_t value,
+ vixl::aarch64::Condition cond = vixl::aarch64::al);
+
+ // List of exception blocks to generate at the end of the code cache.
+ ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
+};
+
+} // namespace arm64
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM64_JNI_MACRO_ASSEMBLER_ARM64_H_
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 1acc90c..797a98c 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -20,11 +20,10 @@
#include <vector>
#ifdef ART_ENABLE_CODEGEN_arm
-#include "arm/assembler_arm32.h"
-#include "arm/assembler_thumb2.h"
+#include "arm/jni_macro_assembler_arm.h"
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
-#include "arm64/assembler_arm64.h"
+#include "arm64/jni_macro_assembler_arm64.h"
#endif
#ifdef ART_ENABLE_CODEGEN_mips
#include "mips/assembler_mips.h"
@@ -58,9 +57,8 @@
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
- return MacroAsm32UniquePtr(new (arena) arm::Arm32Assembler(arena));
case kThumb2:
- return MacroAsm32UniquePtr(new (arena) arm::Thumb2Assembler(arena));
+ return MacroAsm32UniquePtr(new (arena) arm::ArmJNIMacroAssembler(arena, instruction_set));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
@@ -90,7 +88,7 @@
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
- return MacroAsm64UniquePtr(new (arena) arm64::Arm64Assembler(arena));
+ return MacroAsm64UniquePtr(new (arena) arm64::Arm64JNIMacroAssembler(arena));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
diff --git a/compiler/utils/x86/constants_x86.h b/compiler/utils/x86/constants_x86.h
index 2dfb65c..0bc1560 100644
--- a/compiler/utils/x86/constants_x86.h
+++ b/compiler/utils/x86/constants_x86.h
@@ -97,6 +97,8 @@
kNotZero = kNotEqual,
kNegative = kSign,
kPositive = kNotSign,
+ kCarrySet = kBelow,
+ kCarryClear = kAboveEqual,
kUnordered = kParityEven
};
diff --git a/compiler/utils/x86_64/constants_x86_64.h b/compiler/utils/x86_64/constants_x86_64.h
index 37db6b1..cc508a1 100644
--- a/compiler/utils/x86_64/constants_x86_64.h
+++ b/compiler/utils/x86_64/constants_x86_64.h
@@ -106,6 +106,8 @@
kNotZero = kNotEqual,
kNegative = kSign,
kPositive = kNotSign,
+ kCarrySet = kBelow,
+ kCarryClear = kAboveEqual,
kUnordered = kParityEven
};
diff --git a/dex2oat/Android.mk b/dex2oat/Android.mk
index f5f02cd..37acef6 100644
--- a/dex2oat/Android.mk
+++ b/dex2oat/Android.mk
@@ -62,7 +62,6 @@
libnativebridge \
libnativeloader \
libsigchain_dummy \
- libvixl-arm64 \
liblog \
libz \
libbacktrace \
@@ -83,14 +82,14 @@
ifeq ($(ART_BUILD_HOST_NDEBUG),true)
$(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libcutils libart-compiler libsigchain libziparchive-host liblz4,art/compiler,host,ndebug,$(dex2oat_host_arch)))
ifeq ($(ART_BUILD_HOST_STATIC),true)
- $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart libart-compiler libart $(DEX2OAT_STATIC_DEPENDENCIES),art/compiler,host,ndebug,$(dex2oat_host_arch),static))
+ $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart libart-compiler libart libvixl-arm64 $(DEX2OAT_STATIC_DEPENDENCIES),art/compiler,host,ndebug,$(dex2oat_host_arch),static))
endif
endif
ifeq ($(ART_BUILD_HOST_DEBUG),true)
$(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libcutils libartd-compiler libsigchain libziparchive-host liblz4,art/compiler,host,debug,$(dex2oat_host_arch)))
ifeq ($(ART_BUILD_HOST_STATIC),true)
- $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd libartd-compiler libartd $(DEX2OAT_STATIC_DEPENDENCIES),art/compiler,host,debug,$(dex2oat_host_arch),static))
+ $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd libartd-compiler libartd libvixld-arm64 $(DEX2OAT_STATIC_DEPENDENCIES),art/compiler,host,debug,$(dex2oat_host_arch),static))
endif
endif
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index eb11f6d..cfcfe1c 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1576,7 +1576,7 @@
IsAppImage(),
image_classes_.release(),
compiled_classes_.release(),
- /* compiled_methods */ nullptr,
+ compiled_methods_.release(),
thread_count_,
dump_stats_,
dump_passes_,
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 96c3267..2042934 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -1060,20 +1060,6 @@
fprintf(gOutFile, "}, %s", indexBuf.get());
break;
}
- case Instruction::k25x: { // op vC, {vD, vE, vF, vG} (B: count)
- u4 arg[Instruction::kMaxVarArgRegs25x];
- pDecInsn->GetAllArgs25x(arg);
- fprintf(gOutFile, " v%d, {", arg[0]);
- for (int i = 0, n = pDecInsn->VRegB(); i < n; i++) {
- if (i == 0) {
- fprintf(gOutFile, "v%d", arg[Instruction::kLambdaVirtualRegisterWidth + i]);
- } else {
- fprintf(gOutFile, ", v%d", arg[Instruction::kLambdaVirtualRegisterWidth + i]);
- }
- } // for
- fputc('}', gOutFile);
- break;
- }
case Instruction::k3rc: // op {vCCCC .. v(CCCC+AA-1)}, thing@BBBB
// NOT SUPPORTED:
// case Instruction::k3rms: // [opt] invoke-virtual+super/range
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index 778fe8e..db327fc 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -90,9 +90,9 @@
LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
# For disassembler_arm64.
ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_SHARED_LIBRARIES += libvixl-arm64
+ LOCAL_SHARED_LIBRARIES += libvixld-arm64
else
- LOCAL_SHARED_LIBRARIES += libvixl-arm64
+ LOCAL_SHARED_LIBRARIES += libvixl-arm64
endif
ifeq ($$(art_target_or_host),target)
include $(BUILD_SHARED_LIBRARY)
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index a0def61..77730b9 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -73,9 +73,9 @@
"kResolutionMethod",
"kImtConflictMethod",
"kImtUnimplementedMethod",
- "kCalleeSaveMethod",
- "kRefsOnlySaveMethod",
- "kRefsAndArgsSaveMethod",
+ "kSaveAllCalleeSavesMethod",
+ "kSaveRefsOnlyMethod",
+ "kSaveRefsAndArgsMethod",
"kSaveEverythingMethod",
};
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 7a37f60..2f8b113 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -109,11 +109,6 @@
jit/offline_profiling_info.cc \
jit/profiling_info.cc \
jit/profile_saver.cc \
- lambda/art_lambda_method.cc \
- lambda/box_table.cc \
- lambda/closure.cc \
- lambda/closure_builder.cc \
- lambda/leaking_allocator.cc \
jni_internal.cc \
jobject_comparator.cc \
linear_alloc.cc \
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 6d80eb6..a857976 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -63,137 +63,97 @@
// Grab architecture specific constants.
namespace arm {
#include "arch/arm/asm_support_arm.h"
-static constexpr size_t kFrameSizeSaveAllCalleeSave = FRAME_SIZE_SAVE_ALL_CALLEE_SAVE;
-#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
-static constexpr size_t kFrameSizeRefsOnlyCalleeSave = FRAME_SIZE_REFS_ONLY_CALLEE_SAVE;
-#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
-#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
-#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+static constexpr size_t kFrameSizeSaveAllCalleeSaves = FRAME_SIZE_SAVE_ALL_CALLEE_SAVES;
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVES
+static constexpr size_t kFrameSizeSaveRefsOnly = FRAME_SIZE_SAVE_REFS_ONLY;
+#undef FRAME_SIZE_SAVE_REFS_ONLY
+static constexpr size_t kFrameSizeSaveRefsAndArgs = FRAME_SIZE_SAVE_REFS_AND_ARGS;
+#undef FRAME_SIZE_SAVE_REFS_AND_ARGS
+static constexpr size_t kFrameSizeSaveEverything = FRAME_SIZE_SAVE_EVERYTHING;
+#undef FRAME_SIZE_SAVE_EVERYTHING
} // namespace arm
namespace arm64 {
#include "arch/arm64/asm_support_arm64.h"
-static constexpr size_t kFrameSizeSaveAllCalleeSave = FRAME_SIZE_SAVE_ALL_CALLEE_SAVE;
-#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
-static constexpr size_t kFrameSizeRefsOnlyCalleeSave = FRAME_SIZE_REFS_ONLY_CALLEE_SAVE;
-#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
-#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
-#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+static constexpr size_t kFrameSizeSaveAllCalleeSaves = FRAME_SIZE_SAVE_ALL_CALLEE_SAVES;
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVES
+static constexpr size_t kFrameSizeSaveRefsOnly = FRAME_SIZE_SAVE_REFS_ONLY;
+#undef FRAME_SIZE_SAVE_REFS_ONLY
+static constexpr size_t kFrameSizeSaveRefsAndArgs = FRAME_SIZE_SAVE_REFS_AND_ARGS;
+#undef FRAME_SIZE_SAVE_REFS_AND_ARGS
+static constexpr size_t kFrameSizeSaveEverything = FRAME_SIZE_SAVE_EVERYTHING;
+#undef FRAME_SIZE_SAVE_EVERYTHING
} // namespace arm64
namespace mips {
#include "arch/mips/asm_support_mips.h"
-static constexpr size_t kFrameSizeSaveAllCalleeSave = FRAME_SIZE_SAVE_ALL_CALLEE_SAVE;
-#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
-static constexpr size_t kFrameSizeRefsOnlyCalleeSave = FRAME_SIZE_REFS_ONLY_CALLEE_SAVE;
-#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
-#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
-#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+static constexpr size_t kFrameSizeSaveAllCalleeSaves = FRAME_SIZE_SAVE_ALL_CALLEE_SAVES;
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVES
+static constexpr size_t kFrameSizeSaveRefsOnly = FRAME_SIZE_SAVE_REFS_ONLY;
+#undef FRAME_SIZE_SAVE_REFS_ONLY
+static constexpr size_t kFrameSizeSaveRefsAndArgs = FRAME_SIZE_SAVE_REFS_AND_ARGS;
+#undef FRAME_SIZE_SAVE_REFS_AND_ARGS
+static constexpr size_t kFrameSizeSaveEverything = FRAME_SIZE_SAVE_EVERYTHING;
+#undef FRAME_SIZE_SAVE_EVERYTHING
} // namespace mips
namespace mips64 {
#include "arch/mips64/asm_support_mips64.h"
-static constexpr size_t kFrameSizeSaveAllCalleeSave = FRAME_SIZE_SAVE_ALL_CALLEE_SAVE;
-#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
-static constexpr size_t kFrameSizeRefsOnlyCalleeSave = FRAME_SIZE_REFS_ONLY_CALLEE_SAVE;
-#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
-#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
-#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+static constexpr size_t kFrameSizeSaveAllCalleeSaves = FRAME_SIZE_SAVE_ALL_CALLEE_SAVES;
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVES
+static constexpr size_t kFrameSizeSaveRefsOnly = FRAME_SIZE_SAVE_REFS_ONLY;
+#undef FRAME_SIZE_SAVE_REFS_ONLY
+static constexpr size_t kFrameSizeSaveRefsAndArgs = FRAME_SIZE_SAVE_REFS_AND_ARGS;
+#undef FRAME_SIZE_SAVE_REFS_AND_ARGS
+static constexpr size_t kFrameSizeSaveEverything = FRAME_SIZE_SAVE_EVERYTHING;
+#undef FRAME_SIZE_SAVE_EVERYTHING
} // namespace mips64
namespace x86 {
#include "arch/x86/asm_support_x86.h"
-static constexpr size_t kFrameSizeSaveAllCalleeSave = FRAME_SIZE_SAVE_ALL_CALLEE_SAVE;
-#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
-static constexpr size_t kFrameSizeRefsOnlyCalleeSave = FRAME_SIZE_REFS_ONLY_CALLEE_SAVE;
-#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
-#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
-#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+static constexpr size_t kFrameSizeSaveAllCalleeSaves = FRAME_SIZE_SAVE_ALL_CALLEE_SAVES;
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVES
+static constexpr size_t kFrameSizeSaveRefsOnly = FRAME_SIZE_SAVE_REFS_ONLY;
+#undef FRAME_SIZE_SAVE_REFS_ONLY
+static constexpr size_t kFrameSizeSaveRefsAndArgs = FRAME_SIZE_SAVE_REFS_AND_ARGS;
+#undef FRAME_SIZE_SAVE_REFS_AND_ARGS
+static constexpr size_t kFrameSizeSaveEverything = FRAME_SIZE_SAVE_EVERYTHING;
+#undef FRAME_SIZE_SAVE_EVERYTHING
} // namespace x86
namespace x86_64 {
#include "arch/x86_64/asm_support_x86_64.h"
-static constexpr size_t kFrameSizeSaveAllCalleeSave = FRAME_SIZE_SAVE_ALL_CALLEE_SAVE;
-#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
-static constexpr size_t kFrameSizeRefsOnlyCalleeSave = FRAME_SIZE_REFS_ONLY_CALLEE_SAVE;
-#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
-#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
-#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+static constexpr size_t kFrameSizeSaveAllCalleeSaves = FRAME_SIZE_SAVE_ALL_CALLEE_SAVES;
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVES
+static constexpr size_t kFrameSizeSaveRefsOnly = FRAME_SIZE_SAVE_REFS_ONLY;
+#undef FRAME_SIZE_SAVE_REFS_ONLY
+static constexpr size_t kFrameSizeSaveRefsAndArgs = FRAME_SIZE_SAVE_REFS_AND_ARGS;
+#undef FRAME_SIZE_SAVE_REFS_AND_ARGS
+static constexpr size_t kFrameSizeSaveEverything = FRAME_SIZE_SAVE_EVERYTHING;
+#undef FRAME_SIZE_SAVE_EVERYTHING
} // namespace x86_64
// Check architecture specific constants are sound.
-TEST_F(ArchTest, ARM) {
- CheckFrameSize(InstructionSet::kArm, Runtime::kSaveAll, arm::kFrameSizeSaveAllCalleeSave);
- CheckFrameSize(InstructionSet::kArm, Runtime::kRefsOnly, arm::kFrameSizeRefsOnlyCalleeSave);
- CheckFrameSize(InstructionSet::kArm, Runtime::kRefsAndArgs, arm::kFrameSizeRefsAndArgsCalleeSave);
- CheckFrameSize(InstructionSet::kArm,
- Runtime::kSaveEverything,
- arm::kFrameSizeSaveEverythingCalleeSave);
-}
-
-
-TEST_F(ArchTest, ARM64) {
- CheckFrameSize(InstructionSet::kArm64, Runtime::kSaveAll, arm64::kFrameSizeSaveAllCalleeSave);
- CheckFrameSize(InstructionSet::kArm64, Runtime::kRefsOnly, arm64::kFrameSizeRefsOnlyCalleeSave);
- CheckFrameSize(InstructionSet::kArm64, Runtime::kRefsAndArgs,
- arm64::kFrameSizeRefsAndArgsCalleeSave);
- CheckFrameSize(InstructionSet::kArm64,
- Runtime::kSaveEverything,
- arm64::kFrameSizeSaveEverythingCalleeSave);
-}
-
-TEST_F(ArchTest, MIPS) {
- CheckFrameSize(InstructionSet::kMips, Runtime::kSaveAll, mips::kFrameSizeSaveAllCalleeSave);
- CheckFrameSize(InstructionSet::kMips, Runtime::kRefsOnly, mips::kFrameSizeRefsOnlyCalleeSave);
- CheckFrameSize(InstructionSet::kMips,
- Runtime::kRefsAndArgs,
- mips::kFrameSizeRefsAndArgsCalleeSave);
- CheckFrameSize(InstructionSet::kMips,
- Runtime::kSaveEverything,
- mips::kFrameSizeSaveEverythingCalleeSave);
-}
-
-TEST_F(ArchTest, MIPS64) {
- CheckFrameSize(InstructionSet::kMips64, Runtime::kSaveAll, mips64::kFrameSizeSaveAllCalleeSave);
- CheckFrameSize(InstructionSet::kMips64, Runtime::kRefsOnly, mips64::kFrameSizeRefsOnlyCalleeSave);
- CheckFrameSize(InstructionSet::kMips64,
- Runtime::kRefsAndArgs,
- mips64::kFrameSizeRefsAndArgsCalleeSave);
- CheckFrameSize(InstructionSet::kMips64,
- Runtime::kSaveEverything,
- mips64::kFrameSizeSaveEverythingCalleeSave);
-}
-
-TEST_F(ArchTest, X86) {
- CheckFrameSize(InstructionSet::kX86, Runtime::kSaveAll, x86::kFrameSizeSaveAllCalleeSave);
- CheckFrameSize(InstructionSet::kX86, Runtime::kRefsOnly, x86::kFrameSizeRefsOnlyCalleeSave);
- CheckFrameSize(InstructionSet::kX86, Runtime::kRefsAndArgs, x86::kFrameSizeRefsAndArgsCalleeSave);
- CheckFrameSize(InstructionSet::kX86,
- Runtime::kSaveEverything,
- x86::kFrameSizeSaveEverythingCalleeSave);
-}
-
-TEST_F(ArchTest, X86_64) {
- CheckFrameSize(InstructionSet::kX86_64, Runtime::kSaveAll, x86_64::kFrameSizeSaveAllCalleeSave);
- CheckFrameSize(InstructionSet::kX86_64, Runtime::kRefsOnly, x86_64::kFrameSizeRefsOnlyCalleeSave);
- CheckFrameSize(InstructionSet::kX86_64,
- Runtime::kRefsAndArgs,
- x86_64::kFrameSizeRefsAndArgsCalleeSave);
- CheckFrameSize(InstructionSet::kX86_64,
- Runtime::kSaveEverything,
- x86_64::kFrameSizeSaveEverythingCalleeSave);
-}
+#define TEST_ARCH(Arch, arch) \
+ TEST_F(ArchTest, Arch) { \
+ CheckFrameSize(InstructionSet::k##Arch, \
+ Runtime::kSaveAllCalleeSaves, \
+ arch::kFrameSizeSaveAllCalleeSaves); \
+ CheckFrameSize(InstructionSet::k##Arch, \
+ Runtime::kSaveRefsOnly, \
+ arch::kFrameSizeSaveRefsOnly); \
+ CheckFrameSize(InstructionSet::k##Arch, \
+ Runtime::kSaveRefsAndArgs, \
+ arch::kFrameSizeSaveRefsAndArgs); \
+ CheckFrameSize(InstructionSet::k##Arch, \
+ Runtime::kSaveEverything, \
+ arch::kFrameSizeSaveEverything); \
+ }
+TEST_ARCH(Arm, arm)
+TEST_ARCH(Arm64, arm64)
+TEST_ARCH(Mips, mips)
+TEST_ARCH(Mips64, mips64)
+TEST_ARCH(X86, x86)
+TEST_ARCH(X86_64, x86_64)
} // namespace art
diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h
index 67f6f7a..c03bcae 100644
--- a/runtime/arch/arm/asm_support_arm.h
+++ b/runtime/arch/arm/asm_support_arm.h
@@ -19,10 +19,10 @@
#include "asm_support.h"
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 112
-#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 32
-#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 112
-#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE 192
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES 112
+#define FRAME_SIZE_SAVE_REFS_ONLY 32
+#define FRAME_SIZE_SAVE_REFS_AND_ARGS 112
+#define FRAME_SIZE_SAVE_EVERYTHING 192
// Flag for enabling R4 optimization in arm runtime
// #define ARM_R4_SUSPEND_FLAG
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 42418ad..3d0da80 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -42,30 +42,31 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveAll)
+ * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
*/
-.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME rTemp
+.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME rTemp
SPILL_ALL_CALLEE_SAVE_GPRS @ 9 words (36 bytes) of callee saves.
vpush {s16-s31} @ 16 words (64 bytes) of floats.
.cfi_adjust_cfa_offset 64
sub sp, #12 @ 3 words of space, bottom word will hold Method*
.cfi_adjust_cfa_offset 12
RUNTIME_CURRENT1 \rTemp @ Load Runtime::Current into rTemp.
- ldr \rTemp, [\rTemp, #RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET] @ rTemp is kSaveAll Method*.
+ @ Load kSaveAllCalleeSaves Method* into rTemp.
+ ldr \rTemp, [\rTemp, #RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET]
str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 36 + 64 + 12)
-#error "SAVE_ALL_CALLEE_SAVE_FRAME(ARM) size not as expected."
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 36 + 64 + 12)
+#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(ARM) size not as expected."
#endif
.endm
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsOnly).
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly).
*/
-.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME rTemp
+.macro SETUP_SAVE_REFS_ONLY_FRAME rTemp
push {r5-r8, r10-r11, lr} @ 7 words of callee saves
.cfi_adjust_cfa_offset 28
.cfi_rel_offset r5, 0
@@ -78,17 +79,18 @@
sub sp, #4 @ bottom word will hold Method*
.cfi_adjust_cfa_offset 4
RUNTIME_CURRENT2 \rTemp @ Load Runtime::Current into rTemp.
- ldr \rTemp, [\rTemp, #RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET] @ rTemp is kRefsOnly Method*.
+ @ Load kSaveRefsOnly Method* into rTemp.
+ ldr \rTemp, [\rTemp, #RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET]
str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 28 + 4)
-#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM) size not as expected."
+#if (FRAME_SIZE_SAVE_REFS_ONLY != 28 + 4)
+#error "FRAME_SIZE_SAVE_REFS_ONLY(ARM) size not as expected."
#endif
.endm
-.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+.macro RESTORE_SAVE_REFS_ONLY_FRAME
add sp, #4 @ bottom word holds Method*
.cfi_adjust_cfa_offset -4
pop {r5-r8, r10-r11, lr} @ 7 words of callee saves
@@ -102,16 +104,16 @@
.cfi_adjust_cfa_offset -28
.endm
-.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+.macro RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN
+ RESTORE_SAVE_REFS_ONLY_FRAME
bx lr @ return
.endm
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsAndArgs).
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs).
*/
-.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
+.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves and args.
.cfi_adjust_cfa_offset 40
.cfi_rel_offset r1, 0
@@ -126,30 +128,30 @@
.cfi_rel_offset lr, 36
vpush {s0-s15} @ 16 words of float args.
.cfi_adjust_cfa_offset 64
- sub sp, #8 @ 2 words of space, bottom word will hold Method*
+ sub sp, #8 @ 2 words of space, alignment padding and Method*
.cfi_adjust_cfa_offset 8
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 40 + 64 + 8)
-#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(ARM) size not as expected."
+#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 40 + 64 + 8)
+#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(ARM) size not as expected."
#endif
.endm
-.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME rTemp
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
+.macro SETUP_SAVE_REFS_AND_ARGS_FRAME rTemp
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
RUNTIME_CURRENT3 \rTemp @ Load Runtime::Current into rTemp.
- @ rTemp is kRefsAndArgs Method*.
- ldr \rTemp, [\rTemp, #RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET]
+ @ Load kSaveRefsAndArgs Method* into rTemp.
+ ldr \rTemp, [\rTemp, #RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET]
str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
.endm
-.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_R0
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
- str r0, [sp, #0] @ Store ArtMethod* to bottom of stack.
+.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_R0
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
+ str r0, [sp, #0] @ Store ArtMethod* to bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
.endm
-.macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME
add sp, #8 @ rewind sp
.cfi_adjust_cfa_offset -8
vpop {s0-s15}
@@ -172,7 +174,7 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveEverything)
*/
-.macro SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME rTemp
+.macro SETUP_SAVE_EVERYTHING_FRAME rTemp
push {r0-r12, lr} @ 14 words of callee saves and args.
.cfi_adjust_cfa_offset 56
.cfi_rel_offset r0, 0
@@ -189,26 +191,26 @@
.cfi_rel_offset r11, 44
.cfi_rel_offset ip, 48
.cfi_rel_offset lr, 52
- vpush {s0-s31} @ 32 words of float args.
+ vpush {d0-d15} @ 32 words of float args.
.cfi_adjust_cfa_offset 128
sub sp, #8 @ 2 words of space, alignment padding and Method*
.cfi_adjust_cfa_offset 8
RUNTIME_CURRENT1 \rTemp @ Load Runtime::Current into rTemp.
- @ Load kSaveEverything Method* to rTemp.
- ldr \rTemp, [\rTemp, #RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET]
- str \rTemp, [sp, #0] @ Store kSaveEverything Method* to the bottom of the stack.
+ @ Load kSaveEverything Method* into rTemp.
+ ldr \rTemp, [\rTemp, #RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET]
+ str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 56 + 128 + 8)
-#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(ARM) size not as expected."
+#if (FRAME_SIZE_SAVE_EVERYTHING != 56 + 128 + 8)
+#error "FRAME_SIZE_SAVE_EVERYTHING(ARM) size not as expected."
#endif
.endm
-.macro RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+.macro RESTORE_SAVE_EVERYTHING_FRAME
add sp, #8 @ rewind sp
.cfi_adjust_cfa_offset -8
- vpop {s0-s31}
+ vpop {d0-d15}
.cfi_adjust_cfa_offset -128
pop {r0-r12, lr} @ 14 words of callee saves
.cfi_restore r0
@@ -246,7 +248,7 @@
.macro DELIVER_PENDING_EXCEPTION
.fnend
.fnstart
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0 @ save callee saves for throw
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0 @ save callee saves for throw
mov r0, r9 @ pass Thread::Current
b artDeliverPendingExceptionFromCode @ artDeliverPendingExceptionFromCode(Thread*)
.endm
@@ -254,7 +256,7 @@
.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0 @ save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0 @ save all registers as basis for long jump context
mov r0, r9 @ pass Thread::Current
b \cxx_name @ \cxx_name(Thread*)
END \c_name
@@ -263,7 +265,7 @@
.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r1 @ save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r1 @ save all registers as basis for long jump context
mov r1, r9 @ pass Thread::Current
b \cxx_name @ \cxx_name(Thread*)
END \c_name
@@ -272,7 +274,7 @@
.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r2 @ save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2 @ save all registers as basis for long jump context
mov r2, r9 @ pass Thread::Current
b \cxx_name @ \cxx_name(Thread*)
END \c_name
@@ -304,11 +306,11 @@
.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves in case of GC
- ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
+ SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case of GC
+ ldr r1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
mov r2, r9 @ pass Thread::Current
bl \entrypoint @ (uint32_t field_idx, const Method* referrer, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
.endm
@@ -316,11 +318,11 @@
.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ save callee saves in case of GC
- ldr r2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
+ SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
+ ldr r2, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
mov r3, r9 @ pass Thread::Current
bl \entrypoint @ (field_idx, Object*, referrer, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
.endm
@@ -328,14 +330,14 @@
.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3 @ save callee saves in case of GC
- ldr r3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
+ SETUP_SAVE_REFS_ONLY_FRAME r3 @ save callee saves in case of GC
+ ldr r3, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
bl \entrypoint @ (field_idx, Object*, new_val, referrer, Thread*)
add sp, #16 @ release out args
.cfi_adjust_cfa_offset -16
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
+ RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
\return
END \name
.endm
@@ -400,12 +402,12 @@
*/
.macro INVOKE_TRAMPOLINE_BODY cxx_name
.extern \cxx_name
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2 @ save callee saves in case allocation triggers GC
+ SETUP_SAVE_REFS_AND_ARGS_FRAME r2 @ save callee saves in case allocation triggers GC
mov r2, r9 @ pass Thread::Current
mov r3, sp
bl \cxx_name @ (method_idx, this, Thread*, SP)
mov r12, r1 @ save Method*->code_
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
cbz r0, 1f @ did we find the target? if not go to exception delivery
bx r12 @ tail call to target
1:
@@ -606,19 +608,19 @@
.Llock_strex_fail:
b .Lretry_lock @ retry
.Lslow_lock:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves in case we block
+ SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case we block
mov r1, r9 @ pass Thread::Current
bl artLockObjectFromCode @ (Object* obj, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_lock_object
ENTRY art_quick_lock_object_no_inline
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves in case we block
+ SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case we block
mov r1, r9 @ pass Thread::Current
bl artLockObjectFromCode @ (Object* obj, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_lock_object_no_inline
@@ -672,20 +674,20 @@
b .Lretry_unlock @ retry
.Lslow_unlock:
@ save callee saves in case exception allocation triggers GC
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1
+ SETUP_SAVE_REFS_ONLY_FRAME r1
mov r1, r9 @ pass Thread::Current
bl artUnlockObjectFromCode @ (Object* obj, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_unlock_object
ENTRY art_quick_unlock_object_no_inline
@ save callee saves in case exception allocation triggers GC
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1
+ SETUP_SAVE_REFS_ONLY_FRAME r1
mov r1, r9 @ pass Thread::Current
bl artUnlockObjectFromCode @ (Object* obj, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_unlock_object_no_inline
@@ -717,7 +719,7 @@
.cfi_restore r0
.cfi_restore r1
.cfi_restore lr
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r2 @ save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2 @ save all registers as basis for long jump context
mov r2, r9 @ pass Thread::Current
b artThrowClassCastException @ (Class*, Class*, Thread*)
bkpt
@@ -859,7 +861,7 @@
.Lthrow_array_store_exception:
pop {r0-r2, lr}
/* No need to repeat restore cfi directives, the ones above apply here. */
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r3
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r3
mov r1, r2
mov r2, r9 @ pass Thread::Current
b artThrowArrayStoreException @ (Class*, Class*, Thread*)
@@ -870,10 +872,10 @@
.macro ONE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case of GC
mov r1, r9 @ pass Thread::Current
bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
.endm
@@ -882,10 +884,10 @@
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
mov r2, r9 @ pass Thread::Current
bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
.endm
@@ -894,11 +896,11 @@
.macro THREE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3 @ save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME r3 @ save callee saves in case of GC
mov r3, r9 @ pass Thread::Current
@ (uint32_t type_idx, Method* method, int32_t component_count, Thread*)
bl \entrypoint
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
.endm
@@ -907,13 +909,13 @@
.macro FOUR_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r12 @ save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME r12 @ save callee saves in case of GC
str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
bl \entrypoint
add sp, #16 @ strip the extra frame
.cfi_adjust_cfa_offset -16
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
.endm
@@ -936,12 +938,12 @@
*/
.extern artGet64StaticFromCode
ENTRY art_quick_get64_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ save callee saves in case of GC
- ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
+ SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
+ ldr r1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
mov r2, r9 @ pass Thread::Current
bl artGet64StaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
cbnz r2, 1f @ success if no exception pending
bx lr @ return on success
1:
@@ -962,12 +964,12 @@
*/
.extern artGet64InstanceFromCode
ENTRY art_quick_get64_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ save callee saves in case of GC
- ldr r2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
+ SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
+ ldr r2, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
mov r3, r9 @ pass Thread::Current
bl artGet64InstanceFromCode @ (field_idx, Object*, referrer, Thread*)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
cbnz r2, 1f @ success if no exception pending
bx lr @ return on success
1:
@@ -987,15 +989,15 @@
*/
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case of GC
@ r2:r3 contain the wide argument
- ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
+ ldr r1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
bl artSet64StaticFromCode @ (field_idx, referrer, new_val, Thread*)
add sp, #16 @ release out args
.cfi_adjust_cfa_offset -16
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
+ RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_set64_static
@@ -1012,9 +1014,9 @@
*/
.extern artSet64InstanceFromCode
ENTRY art_quick_set64_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r12 @ save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME r12 @ save callee saves in case of GC
@ r2:r3 contain the wide argument
- ldr r12, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
+ ldr r12, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] @ pass referrer
str r9, [sp, #-12]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 12
str r12, [sp, #-4]! @ expand the frame and pass the referrer
@@ -1022,7 +1024,7 @@
bl artSet64InstanceFromCode @ (field_idx, Object*, new_val, Method* referrer, Thread*)
add sp, #16 @ release out args
.cfi_adjust_cfa_offset -16
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
+ RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_set64_instance
@@ -1138,10 +1140,10 @@
bx lr
.Lart_quick_alloc_object_rosalloc_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
mov r2, r9 @ pass Thread::Current
bl artAllocObjectFromCodeRosAlloc @ (uint32_t type_idx, Method* method, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_rosalloc
@@ -1223,10 +1225,10 @@
ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
.Lart_quick_alloc_object_tlab_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 // Save callee saves in case of GC.
+ SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC.
mov r2, r9 // Pass Thread::Current.
bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_tlab
@@ -1258,10 +1260,10 @@
pop {r0, r1, r3, lr}
b .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
.Lart_quick_alloc_object_region_tlab_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 // Save callee saves in case of GC.
+ SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC.
mov r2, r9 // Pass Thread::Current.
bl artAllocObjectFromCodeRegionTLAB // (uint32_t type_idx, Method* method, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_region_tlab
@@ -1278,18 +1280,18 @@
1:
mov rSUSPEND, #SUSPEND_CHECK_INTERVAL @ reset rSUSPEND to SUSPEND_CHECK_INTERVAL
#endif
- SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME r0 @ save everything for GC stack crawl
+ SETUP_SAVE_EVERYTHING_FRAME r0 @ save everything for GC stack crawl
mov r0, rSELF
bl artTestSuspendFromCode @ (Thread*)
- RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_EVERYTHING_FRAME
bx lr
END art_quick_test_suspend
ENTRY art_quick_implicit_suspend
mov r0, rSELF
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves for stack crawl
+ SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves for stack crawl
bl artTestSuspendFromCode @ (Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN
END art_quick_implicit_suspend
/*
@@ -1299,15 +1301,15 @@
*/
.extern artQuickProxyInvokeHandler
ENTRY art_quick_proxy_invoke_handler
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_R0
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_R0
mov r2, r9 @ pass Thread::Current
mov r3, sp @ pass SP
blx artQuickProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
// Tear down the callee-save frame. Skip arg registers.
- add sp, #(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
- .cfi_adjust_cfa_offset -(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ add sp, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
+ .cfi_adjust_cfa_offset -(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
+ RESTORE_SAVE_REFS_ONLY_FRAME
cbnz r2, 1f @ success if no exception is pending
vmov d0, r0, r1 @ store into fpr, for when it's a fpr return...
bx lr @ return on success
@@ -1350,17 +1352,17 @@
.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2
+ SETUP_SAVE_REFS_AND_ARGS_FRAME r2
mov r2, r9 @ pass Thread::Current
mov r3, sp @ pass SP
blx artQuickResolutionTrampoline @ (Method* called, receiver, Thread*, SP)
cbz r0, 1f @ is code pointer null? goto exception
mov r12, r0
ldr r0, [sp, #0] @ load resolved method in r0
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
bx r12 @ tail-call into actual code
1:
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_resolution_trampoline
@@ -1368,7 +1370,7 @@
* Called to do a generic JNI down-call
*/
ENTRY art_quick_generic_jni_trampoline
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_R0
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_R0
// Save rSELF
mov r11, rSELF
@@ -1435,16 +1437,16 @@
.cfi_def_cfa_register sp
// Tear down the callee-save frame. Skip arg registers.
- add sp, #FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
- .cfi_adjust_cfa_offset -(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ add sp, #FRAME_SIZE_SAVE_REFS_AND_ARGS-FRAME_SIZE_SAVE_REFS_ONLY
+ .cfi_adjust_cfa_offset -(FRAME_SIZE_SAVE_REFS_AND_ARGS-FRAME_SIZE_SAVE_REFS_ONLY)
+ RESTORE_SAVE_REFS_ONLY_FRAME
// store into fpr, for when it's a fpr return...
vmov d0, r0, r1
bx lr // ret
// Undo the unwinding information from above since it doesn't apply below.
.cfi_def_cfa_register r10
- .cfi_adjust_cfa_offset FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+ .cfi_adjust_cfa_offset FRAME_SIZE_SAVE_REFS_AND_ARGS-FRAME_SIZE_SAVE_REFS_ONLY
.Lexception_in_native:
ldr sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]
@@ -1455,15 +1457,15 @@
.extern artQuickToInterpreterBridge
ENTRY art_quick_to_interpreter_bridge
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r1
+ SETUP_SAVE_REFS_AND_ARGS_FRAME r1
mov r1, r9 @ pass Thread::Current
mov r2, sp @ pass SP
blx artQuickToInterpreterBridge @ (Method* method, Thread*, SP)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
// Tear down the callee-save frame. Skip arg registers.
- add sp, #(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
- .cfi_adjust_cfa_offset -(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ add sp, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
+ .cfi_adjust_cfa_offset -(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
+ RESTORE_SAVE_REFS_ONLY_FRAME
cbnz r2, 1f @ success if no exception is pending
vmov d0, r0, r1 @ store into fpr, for when it's a fpr return...
bx lr @ return on success
@@ -1478,22 +1480,22 @@
.extern artInstrumentationMethodExitFromCode
ENTRY art_quick_instrumentation_entry
@ Make stack crawlable and clobber r2 and r3 (post saving)
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2
- @ preserve r0 (not normally an arg) knowing there is a spare slot in kRefsAndArgs.
+ SETUP_SAVE_REFS_AND_ARGS_FRAME r2
+ @ preserve r0 (not normally an arg) knowing there is a spare slot in kSaveRefsAndArgs.
str r0, [sp, #4]
mov r2, r9 @ pass Thread::Current
mov r3, lr @ pass LR
blx artInstrumentationMethodEntryFromCode @ (Method*, Object*, Thread*, LR)
mov r12, r0 @ r12 holds reference to code
ldr r0, [sp, #4] @ restore r0
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
blx r12 @ call method with lr set to art_quick_instrumentation_exit
@ Deliberate fall-through into art_quick_instrumentation_exit.
.type art_quick_instrumentation_exit, #function
.global art_quick_instrumentation_exit
art_quick_instrumentation_exit:
mov lr, #0 @ link register is to here, so clobber with 0 for later checks
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ set up frame knowing r2 and r3 must be dead on exit
+ SETUP_SAVE_REFS_ONLY_FRAME r2 @ set up frame knowing r2 and r3 must be dead on exit
mov r12, sp @ remember bottom of caller's frame
push {r0-r1} @ save return value
.cfi_adjust_cfa_offset 8
@@ -1532,7 +1534,7 @@
*/
.extern artDeoptimize
ENTRY art_quick_deoptimize
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0
mov r0, r9 @ Set up args.
blx artDeoptimize @ artDeoptimize(Thread*)
END art_quick_deoptimize
@@ -1543,7 +1545,7 @@
*/
.extern artDeoptimizeFromCompiledCode
ENTRY art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0
mov r0, r9 @ Set up args.
blx artDeoptimizeFromCompiledCode @ artDeoptimizeFromCompiledCode(Thread*)
END art_quick_deoptimize_from_compiled_code
diff --git a/runtime/arch/arm/quick_method_frame_info_arm.h b/runtime/arch/arm/quick_method_frame_info_arm.h
index c474d2e..4b23c77 100644
--- a/runtime/arch/arm/quick_method_frame_info_arm.h
+++ b/runtime/arch/arm/quick_method_frame_info_arm.h
@@ -55,15 +55,15 @@
constexpr uint32_t ArmCalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
return kArmCalleeSaveAlwaysSpills | kArmCalleeSaveRefSpills |
- (type == Runtime::kRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAll ? kArmCalleeSaveAllSpills : 0) |
+ (type == Runtime::kSaveRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
+ (type == Runtime::kSaveAllCalleeSaves ? kArmCalleeSaveAllSpills : 0) |
(type == Runtime::kSaveEverything ? kArmCalleeSaveEverythingSpills : 0);
}
constexpr uint32_t ArmCalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kArmCalleeSaveFpAlwaysSpills | kArmCalleeSaveFpRefSpills |
- (type == Runtime::kRefsAndArgs ? kArmCalleeSaveFpArgSpills: 0) |
- (type == Runtime::kSaveAll ? kArmCalleeSaveFpAllSpills : 0) |
+ (type == Runtime::kSaveRefsAndArgs ? kArmCalleeSaveFpArgSpills: 0) |
+ (type == Runtime::kSaveAllCalleeSaves ? kArmCalleeSaveFpAllSpills : 0) |
(type == Runtime::kSaveEverything ? kArmCalleeSaveFpEverythingSpills : 0);
}
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index 68d12e9..5e7b51d 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -19,9 +19,9 @@
#include "asm_support.h"
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 176
-#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 96
-#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 224
-#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE 512
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES 176
+#define FRAME_SIZE_SAVE_REFS_ONLY 96
+#define FRAME_SIZE_SAVE_REFS_AND_ARGS 224
+#define FRAME_SIZE_SAVE_EVERYTHING 512
#endif // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 415bb71..35f5c56 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -21,25 +21,25 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveAll)
+ * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
*/
-.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ // art::Runtime** xIP0 = &art::Runtime::instance_
adrp xIP0, :got:_ZN3art7Runtime9instance_E
ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
// Our registers aren't intermixed - just spill in order.
- ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
+ ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_;
- // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
- // Loads appropriate callee-save-method.
- ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
+ // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveAllCalleeSaves];
+ ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET]
sub sp, sp, #176
.cfi_adjust_cfa_offset 176
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 176)
-#error "SAVE_ALL_CALLEE_SAVE_FRAME(ARM64) size not as expected."
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 176)
+#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(ARM64) size not as expected."
#endif
// Stack alignment filler [sp, #8].
@@ -74,7 +74,7 @@
.cfi_rel_offset x29, 160
.cfi_rel_offset x30, 168
- // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs].
+ // Store ArtMethod* Runtime::callee_save_methods_[kSaveAllCalleeSaves].
str xIP0, [sp]
// Place sp in Thread::Current()->top_quick_frame.
mov xIP0, sp
@@ -83,25 +83,25 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsOnly).
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly).
*/
-.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+.macro SETUP_SAVE_REFS_ONLY_FRAME
+ // art::Runtime** xIP0 = &art::Runtime::instance_
adrp xIP0, :got:_ZN3art7Runtime9instance_E
ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
// Our registers aren't intermixed - just spill in order.
- ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
+ ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_;
- // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefOnly] .
- // Loads appropriate callee-save-method.
- ldr xIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
+ // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefOnly];
+ ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET]
sub sp, sp, #96
.cfi_adjust_cfa_offset 96
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 96)
-#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM64) size not as expected."
+#if (FRAME_SIZE_SAVE_REFS_ONLY != 96)
+#error "FRAME_SIZE_SAVE_REFS_ONLY(ARM64) size not as expected."
#endif
// GP callee-saves.
@@ -126,7 +126,7 @@
.cfi_rel_offset x29, 80
.cfi_rel_offset x30, 88
- // Store ArtMethod* Runtime::callee_save_methods_[kRefsOnly].
+ // Store ArtMethod* Runtime::callee_save_methods_[kSaveRefsOnly].
stp xIP0, x20, [sp]
.cfi_rel_offset x20, 8
@@ -136,7 +136,7 @@
.endm
// TODO: Probably no need to restore registers preserved by aapcs64.
-.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+.macro RESTORE_SAVE_REFS_ONLY_FRAME
// Callee-saves.
ldr x20, [sp, #8]
.cfi_restore x20
@@ -165,24 +165,24 @@
.cfi_adjust_cfa_offset -96
.endm
-.macro POP_REFS_ONLY_CALLEE_SAVE_FRAME
+.macro POP_SAVE_REFS_ONLY_FRAME
add sp, sp, #96
.cfi_adjust_cfa_offset - 96
.endm
-.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+.macro RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN
+ RESTORE_SAVE_REFS_ONLY_FRAME
ret
.endm
-.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
+.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL
sub sp, sp, #224
.cfi_adjust_cfa_offset 224
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 224)
-#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(ARM64) size not as expected."
+#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 224)
+#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(ARM64) size not as expected."
#endif
// Stack alignment filler [sp, #8].
@@ -235,30 +235,31 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsAndArgs).
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs).
*
* TODO This is probably too conservative - saving FP & LR.
*/
-.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+.macro SETUP_SAVE_REFS_AND_ARGS_FRAME
+ // art::Runtime** xIP0 = &art::Runtime::instance_
adrp xIP0, :got:_ZN3art7Runtime9instance_E
ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
// Our registers aren't intermixed - just spill in order.
- ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
+ ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_;
- // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
- ldr xIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
+ // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefAndArgs];
+ ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET]
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL
- str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
+ str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kSaveRefsAndArgs].
// Place sp in Thread::Current()->top_quick_frame.
mov xIP0, sp
str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
.endm
-.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
+.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL
str x0, [sp, #0] // Store ArtMethod* to bottom of stack.
// Place sp in Thread::Current()->top_quick_frame.
mov xIP0, sp
@@ -266,7 +267,7 @@
.endm
// TODO: Probably no need to restore registers preserved by aapcs64.
-.macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME
// FP args.
ldp d0, d1, [sp, #16]
ldp d2, d3, [sp, #32]
@@ -320,32 +321,33 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveEverything)
*/
-.macro SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+.macro SETUP_SAVE_EVERYTHING_FRAME
sub sp, sp, #512
.cfi_adjust_cfa_offset 512
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 512)
-#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(ARM64) size not as expected."
+#if (FRAME_SIZE_SAVE_EVERYTHING != 512)
+#error "FRAME_SIZE_SAVE_EVERYTHING(ARM64) size not as expected."
#endif
// Save FP registers.
- stp d0, d1, [sp, #8]
- stp d2, d3, [sp, #24]
- stp d4, d5, [sp, #40]
- stp d6, d7, [sp, #56]
- stp d8, d9, [sp, #72]
- stp d10, d11, [sp, #88]
- stp d12, d13, [sp, #104]
- stp d14, d15, [sp, #120]
- stp d16, d17, [sp, #136]
- stp d18, d19, [sp, #152]
- stp d20, d21, [sp, #168]
- stp d22, d23, [sp, #184]
- stp d24, d25, [sp, #200]
- stp d26, d27, [sp, #216]
- stp d28, d29, [sp, #232]
- stp d30, d31, [sp, #248]
+ str d0, [sp, #8]
+ stp d1, d2, [sp, #16]
+ stp d3, d4, [sp, #32]
+ stp d5, d6, [sp, #48]
+ stp d7, d8, [sp, #64]
+ stp d9, d10, [sp, #80]
+ stp d11, d12, [sp, #96]
+ stp d13, d14, [sp, #112]
+ stp d15, d16, [sp, #128]
+ stp d17, d18, [sp, #144]
+ stp d19, d20, [sp, #160]
+ stp d21, d22, [sp, #176]
+ stp d23, d24, [sp, #192]
+ stp d25, d26, [sp, #208]
+ stp d27, d28, [sp, #224]
+ stp d29, d30, [sp, #240]
+ str d31, [sp, #256]
// Save core registers.
str x0, [sp, #264]
@@ -411,14 +413,14 @@
.cfi_rel_offset x29, 496
.cfi_rel_offset x30, 504
+ // art::Runtime** xIP0 = &art::Runtime::instance_
adrp xIP0, :got:_ZN3art7Runtime9instance_E
ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
- ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
+ ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_;
- // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kSaveEverything] .
- // Loads appropriate callee-save-method.
- ldr xIP0, [xIP0, RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET ]
+ // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveEverything];
+ ldr xIP0, [xIP0, RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET]
// Store ArtMethod* Runtime::callee_save_methods_[kSaveEverything].
str xIP0, [sp]
@@ -427,24 +429,25 @@
str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
.endm
-.macro RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+.macro RESTORE_SAVE_EVERYTHING_FRAME
// Restore FP registers.
- ldp d0, d1, [sp, #8]
- ldp d2, d3, [sp, #24]
- ldp d4, d5, [sp, #40]
- ldp d6, d7, [sp, #56]
- ldp d8, d9, [sp, #72]
- ldp d10, d11, [sp, #88]
- ldp d12, d13, [sp, #104]
- ldp d14, d15, [sp, #120]
- ldp d16, d17, [sp, #136]
- ldp d18, d19, [sp, #152]
- ldp d20, d21, [sp, #168]
- ldp d22, d23, [sp, #184]
- ldp d24, d25, [sp, #200]
- ldp d26, d27, [sp, #216]
- ldp d28, d29, [sp, #232]
- ldp d30, d31, [sp, #248]
+ ldr d0, [sp, #8]
+ ldp d1, d2, [sp, #16]
+ ldp d3, d4, [sp, #32]
+ ldp d5, d6, [sp, #48]
+ ldp d7, d8, [sp, #64]
+ ldp d9, d10, [sp, #80]
+ ldp d11, d12, [sp, #96]
+ ldp d13, d14, [sp, #112]
+ ldp d15, d16, [sp, #128]
+ ldp d17, d18, [sp, #144]
+ ldp d19, d20, [sp, #160]
+ ldp d21, d22, [sp, #176]
+ ldp d23, d24, [sp, #192]
+ ldp d25, d26, [sp, #208]
+ ldp d27, d28, [sp, #224]
+ ldp d29, d30, [sp, #240]
+ ldr d31, [sp, #256]
// Restore core registers.
ldr x0, [sp, #264]
@@ -531,7 +534,7 @@
* exception is Thread::Current()->exception_
*/
.macro DELIVER_PENDING_EXCEPTION
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
mov x0, xSELF
// Point of no return.
@@ -566,7 +569,7 @@
.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
mov x0, xSELF // pass Thread::Current
b \cxx_name // \cxx_name(Thread*)
END \c_name
@@ -575,7 +578,7 @@
.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context.
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context.
mov x1, xSELF // pass Thread::Current.
b \cxx_name // \cxx_name(arg, Thread*).
brk 0
@@ -585,7 +588,7 @@
.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
mov x2, xSELF // pass Thread::Current
b \cxx_name // \cxx_name(arg1, arg2, Thread*)
brk 0
@@ -656,7 +659,7 @@
*/
.macro INVOKE_TRAMPOLINE_BODY cxx_name
.extern \cxx_name
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME // save callee saves in case allocation triggers GC
+ SETUP_SAVE_REFS_AND_ARGS_FRAME // save callee saves in case allocation triggers GC
// Helper signature is always
// (method_idx, *this_object, *caller_method, *self, sp)
@@ -664,7 +667,7 @@
mov x3, sp
bl \cxx_name // (method_idx, this, Thread*, SP)
mov xIP0, x1 // save Method*->code_
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
cbz x0, 1f // did we find the target? if not go to exception delivery
br xIP0 // tail call to target
1:
@@ -1315,18 +1318,18 @@
.Llock_stxr_fail:
b .Lretry_lock // retry
.Lslow_lock:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case we block
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case we block
mov x1, xSELF // pass Thread::Current
bl artLockObjectFromCode // (Object* obj, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_lock_object
ENTRY art_quick_lock_object_no_inline
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case we block
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case we block
mov x1, xSELF // pass Thread::Current
bl artLockObjectFromCode // (Object* obj, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_lock_object_no_inline
@@ -1381,18 +1384,18 @@
.Lunlock_stxr_fail:
b .Lretry_unlock // retry
.Lslow_unlock:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case exception allocation triggers GC
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case exception allocation triggers GC
mov x1, xSELF // pass Thread::Current
bl artUnlockObjectFromCode // (Object* obj, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_unlock_object
ENTRY art_quick_unlock_object_no_inline
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case exception allocation triggers GC
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case exception allocation triggers GC
mov x1, xSELF // pass Thread::Current
bl artUnlockObjectFromCode // (Object* obj, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_unlock_object_no_inline
@@ -1437,7 +1440,7 @@
.cfi_restore x1
.cfi_adjust_cfa_offset -32
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
mov x2, xSELF // pass Thread::Current
b artThrowClassCastException // (Class*, Class*, Thread*)
brk 0 // We should not return here...
@@ -1625,7 +1628,7 @@
.cfi_restore x1
.cfi_adjust_cfa_offset -32
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
mov x1, x2 // Pass value.
mov x2, xSELF // Pass Thread::Current.
b artThrowArrayStoreException // (Object*, Object*, Thread*).
@@ -1636,10 +1639,10 @@
.macro ONE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
mov x1, xSELF // pass Thread::Current
bl \entrypoint // (uint32_t type_idx, Method* method, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
.endm
@@ -1648,10 +1651,10 @@
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
mov x2, xSELF // pass Thread::Current
bl \entrypoint // (uint32_t type_idx, Method* method, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
.endm
@@ -1660,10 +1663,10 @@
.macro THREE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
mov x3, xSELF // pass Thread::Current
bl \entrypoint
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
.endm
@@ -1672,10 +1675,10 @@
.macro FOUR_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
mov x4, xSELF // pass Thread::Current
bl \entrypoint //
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
\return
DELIVER_PENDING_EXCEPTION
END \name
@@ -1685,11 +1688,11 @@
.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
- ldr x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
+ ldr x1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer
mov x2, xSELF // pass Thread::Current
bl \entrypoint // (uint32_t type_idx, Method* method, Thread*, SP)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
.endm
@@ -1697,11 +1700,11 @@
.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
- ldr x2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
+ ldr x2, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer
mov x3, xSELF // pass Thread::Current
bl \entrypoint
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
.endm
@@ -1709,11 +1712,11 @@
.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
- ldr x3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
+ ldr x3, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer
mov x4, xSELF // pass Thread::Current
bl \entrypoint
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
\return
END \name
.endm
@@ -1771,12 +1774,12 @@
// This is separated out as the argument order is different.
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
- ldr x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
+ ldr x1, [sp, #FRAME_SIZE_SAVE_REFS_ONLY] // Load referrer
// x2 contains the parameter
mov x3, xSELF // pass Thread::Current
bl artSet64StaticFromCode
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_set64_static
@@ -1788,7 +1791,20 @@
ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
+// Comment out allocators that have arm64 specific asm.
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) implemented in asm
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB) implemented in asm
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
ENTRY art_quick_alloc_object_rosalloc
@@ -1888,13 +1904,80 @@
mov x0, x3 // Set the return value and return.
ret
.Lart_quick_alloc_object_rosalloc_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
mov x2, xSELF // pass Thread::Current
bl artAllocObjectFromCodeRosAlloc // (uint32_t type_idx, Method* method, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_rosalloc
+
+// The common fast path code for art_quick_alloc_array_region_tlab.
+.macro ALLOC_ARRAY_TLAB_FAST_PATH slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
+ // Check null class
+ cbz \wClass, \slowPathLabel
+ ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED \slowPathLabel, \xClass, \wClass, \xCount, \wCount, \xTemp0, \wTemp0, \xTemp1, \wTemp1, \xTemp2, \wTemp2
+.endm
+
+// The common fast path code for art_quick_alloc_array_region_tlab.
+.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
+ // Array classes are never finalizable or uninitialized, no need to check.
+ ldr \wTemp0, [\xClass, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Load component type
+ UNPOISON_HEAP_REF \wTemp0
+ ldr \wTemp0, [\xTemp0, #MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET]
+ lsr \xTemp0, \xTemp0, #PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT // Component size shift is in high 16
+ // bits.
+ // xCount is holding a 32 bit value,
+ // it can not overflow.
+ lsl \xTemp1, \xCount, \xTemp0 // Calculate data size
+ // Add array data offset and alignment.
+ add \xTemp1, \xTemp1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+#if MIRROR_LONG_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
+#error Long array data offset must be 4 greater than int array data offset.
+#endif
+
+ add \xTemp0, \xTemp0, #1 // Add 4 to the length only if the
+ // component size shift is 3
+ // (for 64 bit alignment).
+ and \xTemp0, \xTemp0, #4
+ add \xTemp1, \xTemp1, \xTemp0
+ and \xTemp1, \xTemp1, #OBJECT_ALIGNMENT_MASK_TOGGLED64 // Apply alignemnt mask
+ // (addr + 7) & ~7. The mask must
+ // be 64 bits to keep high bits in
+ // case of overflow.
+ // Negative sized arrays are handled here since xCount holds a zero extended 32 bit value.
+ // Negative ints become large 64 bit unsigned ints which will always be larger than max signed
+ // 32 bit int. Since the max shift for arrays is 3, it can not become a negative 64 bit int.
+ cmp \xTemp1, #MIN_LARGE_OBJECT_THRESHOLD // Possibly a large object, go slow
+ bhs \slowPathLabel // path.
+
+ ldr \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Check tlab for space, note that
+ // we use (end - begin) to handle
+ // negative size arrays. It is
+ // assumed that a negative size will
+ // always be greater unsigned than
+ // region size.
+ ldr \xTemp2, [xSELF, #THREAD_LOCAL_END_OFFSET]
+ sub \xTemp2, \xTemp2, \xTemp0
+ cmp \xTemp1, \xTemp2
+ bhi \slowPathLabel
+ // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
+ // Move old thread_local_pos to x0
+ // for the return value.
+ mov x0, \xTemp0
+ add \xTemp0, \xTemp0, \xTemp1
+ str \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
+ ldr \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
+ add \xTemp0, \xTemp0, #1
+ str \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
+ POISON_HEAP_REF \wClass
+ str \wClass, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
+ str \wCount, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length.
+ // Fence.
+ dmb ishst
+ ret
+.endm
+
// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
//
// x0: type_idx/return value, x1: ArtMethod*, x2: Class*, xSELF(x19): Thread::Current
@@ -1902,8 +1985,11 @@
// Need to preserve x0 and x1 to the slow path.
.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
cbz x2, \slowPathLabel // Check null class
- // Check class status.
- ldr w3, [x2, #MIRROR_CLASS_STATUS_OFFSET]
+ ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED \slowPathLabel
+.endm
+
+.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel
+ ldr w3, [x2, #MIRROR_CLASS_STATUS_OFFSET] // Check class status.
cmp x3, #MIRROR_CLASS_STATUS_INITIALIZED
bne \slowPathLabel
// Add a fake dependence from the
@@ -1916,6 +2002,10 @@
// a load-acquire for the status).
eor x3, x3, x3
add x2, x2, x3
+ ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED \slowPathLabel
+.endm
+
+.macro ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED slowPathLabel
// Check access flags has
// kAccClassIsFinalizable.
ldr w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
@@ -1970,39 +2060,44 @@
ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
.Lart_quick_alloc_object_tlab_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // Save callee saves in case of GC.
+ SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
mov x2, xSELF // Pass Thread::Current.
bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_tlab
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
-ENTRY art_quick_alloc_object_region_tlab
+// The common code for art_quick_alloc_object_*region_tlab
+.macro GENERATE_ALLOC_OBJECT_REGION_TLAB name, entrypoint, fast_path, is_resolved
+ENTRY \name
// Fast path region tlab allocation.
- // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
+ // x0: type_idx/resolved class/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
+ // If is_resolved is 1 then x0 is the resolved type, otherwise it is the index.
// x2-x7: free.
#if !defined(USE_READ_BARRIER)
mvn x0, xzr // Read barrier must be enabled here.
ret // Return -1.
#endif
+.if \is_resolved
+ mov x2, x0 // class is actually stored in x0 already
+.else
ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
// Load the class (x2)
ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
-
+.endif
// Most common case: GC is not marking.
ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
- cbnz x3, .Lart_quick_alloc_object_region_tlab_marking
-.Lart_quick_alloc_object_region_tlab_do_allocation:
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
-.Lart_quick_alloc_object_region_tlab_marking:
+ cbnz x3, .Lmarking\name
+.Ldo_allocation\name:
+ \fast_path .Lslow_path\name
+.Lmarking\name:
// GC is marking, check the lock word of the class for the mark bit.
// If the class is null, go slow path. The check is required to read the lock word.
- cbz w2, .Lart_quick_alloc_object_region_tlab_slow_path
+ cbz w2, .Lslow_path\name
// Class is not null, check mark bit in lock word.
ldr w3, [x2, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
// If the bit is not zero, do the allocation.
- tbnz w3, #LOCK_WORD_MARK_BIT_SHIFT, .Lart_quick_alloc_object_region_tlab_do_allocation
+ tbnz w3, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name
// The read barrier slow path. Mark
// the class.
stp x0, x1, [sp, #-32]! // Save registers (x0, x1, lr).
@@ -2013,32 +2108,97 @@
ldp x0, x1, [sp, #0] // Restore registers.
ldr xLR, [sp, #16]
add sp, sp, #32
- b .Lart_quick_alloc_object_region_tlab_do_allocation
-.Lart_quick_alloc_object_region_tlab_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // Save callee saves in case of GC.
+ b .Ldo_allocation\name
+.Lslow_path\name:
+ SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
mov x2, xSELF // Pass Thread::Current.
- bl artAllocObjectFromCodeRegionTLAB // (uint32_t type_idx, Method* method, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ bl \entrypoint // (uint32_t type_idx, Method* method, Thread*)
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_region_tlab
+END \name
+.endm
+
+GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_region_tlab, artAllocObjectFromCodeRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH, 0
+GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 1
+GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED, 1
+
+// The common code for art_quick_alloc_array_*region_tlab
+.macro GENERATE_ALLOC_ARRAY_REGION_TLAB name, entrypoint, fast_path, is_resolved
+ENTRY \name
+ // Fast path array allocation for region tlab allocation.
+ // x0: uint32_t type_idx
+ // x1: int32_t component_count
+ // x2: ArtMethod* method
+ // x3-x7: free.
+#if !defined(USE_READ_BARRIER)
+ mvn x0, xzr // Read barrier must be enabled here.
+ ret // Return -1.
+#endif
+.if \is_resolved
+ mov x3, x0
+ // If already resolved, class is stored in x0
+.else
+ ldr x3, [x2, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
+ // Load the class (x2)
+ ldr w3, [x3, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
+.endif
+ // Most common case: GC is not marking.
+ ldr w4, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
+ cbnz x4, .Lmarking\name
+.Ldo_allocation\name:
+ \fast_path .Lslow_path\name, x3, w3, x1, w1, x4, w4, x5, w5, x6, w6
+.Lmarking\name:
+ // GC is marking, check the lock word of the class for the mark bit.
+ // If the class is null, go slow path. The check is required to read the lock word.
+ cbz w3, .Lslow_path\name
+ // Class is not null, check mark bit in lock word.
+ ldr w4, [x3, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ // If the bit is not zero, do the allocation.
+ tbnz w4, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name
+ // The read barrier slow path. Mark
+ // the class.
+ stp x0, x1, [sp, #-32]! // Save registers (x0, x1, x2, lr).
+ stp x2, xLR, [sp, #16]
+ mov x0, x3 // Pass the class as the first param.
+ bl artReadBarrierMark
+ mov x3, x0 // Get the (marked) class back.
+ ldp x2, xLR, [sp, #16]
+ ldp x0, x1, [sp], #32 // Restore registers.
+ b .Ldo_allocation\name
+.Lslow_path\name:
+ // x0: uint32_t type_idx / mirror::Class* klass (if resolved)
+ // x1: int32_t component_count
+ // x2: ArtMethod* method
+ // x3: Thread* self
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
+ mov x3, xSELF // pass Thread::Current
+ bl \entrypoint
+ RESTORE_SAVE_REFS_ONLY_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END \name
+.endm
+
+GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_region_tlab, artAllocArrayFromCodeRegionTLAB, ALLOC_ARRAY_TLAB_FAST_PATH, 0
+// TODO: art_quick_alloc_array_resolved_region_tlab seems to not get called. Investigate compiler.
+GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED, 1
/*
* Called by managed code when the thread has been asked to suspend.
*/
.extern artTestSuspendFromCode
ENTRY art_quick_test_suspend
- SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME // save callee saves for stack crawl
+ SETUP_SAVE_EVERYTHING_FRAME // save callee saves for stack crawl
mov x0, xSELF
bl artTestSuspendFromCode // (Thread*)
- RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_EVERYTHING_FRAME
ret
END art_quick_test_suspend
ENTRY art_quick_implicit_suspend
mov x0, xSELF
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves for stack crawl
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves for stack crawl
bl artTestSuspendFromCode // (Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN
END art_quick_implicit_suspend
/*
@@ -2048,17 +2208,17 @@
*/
.extern artQuickProxyInvokeHandler
ENTRY art_quick_proxy_invoke_handler
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0
mov x2, xSELF // pass Thread::Current
mov x3, sp // pass SP
bl artQuickProxyInvokeHandler // (Method* proxy method, receiver, Thread*, SP)
ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET]
cbnz x2, .Lexception_in_proxy // success if no exception is pending
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Restore frame
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME // Restore frame
fmov d0, x0 // Store result in d0 in case it was float or double
ret // return on success
.Lexception_in_proxy:
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_proxy_invoke_handler
@@ -2098,17 +2258,17 @@
END art_quick_imt_conflict_trampoline
ENTRY art_quick_resolution_trampoline
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
mov x2, xSELF
mov x3, sp
bl artQuickResolutionTrampoline // (called, receiver, Thread*, SP)
cbz x0, 1f
mov xIP0, x0 // Remember returned code pointer in xIP0.
ldr x0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP.
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
br xIP0
1:
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_resolution_trampoline
@@ -2168,7 +2328,7 @@
* Called to do a generic JNI down-call
*/
ENTRY art_quick_generic_jni_trampoline
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0
// Save SP , so we can have static CFI info.
mov x28, sp
@@ -2240,7 +2400,7 @@
.cfi_def_cfa_register sp
// Tear down the callee-save frame.
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
// store into fpr, for when it's a fpr return...
fmov d0, x0
@@ -2262,7 +2422,7 @@
* x1..x7, d0..d7 = arguments to that method.
*/
ENTRY art_quick_to_interpreter_bridge
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Set up frame and save arguments.
+ SETUP_SAVE_REFS_AND_ARGS_FRAME // Set up frame and save arguments.
// x0 will contain mirror::ArtMethod* method.
mov x1, xSELF // How to get Thread::Current() ???
@@ -2272,7 +2432,7 @@
// mirror::ArtMethod** sp)
bl artQuickToInterpreterBridge
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // TODO: no need to restore arguments in this case.
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME // TODO: no need to restore arguments in this case.
fmov d0, x0
@@ -2285,7 +2445,7 @@
//
.extern artInstrumentationMethodEntryFromCode
ENTRY art_quick_instrumentation_entry
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
mov x20, x0 // Preserve method reference in a callee-save.
@@ -2296,7 +2456,7 @@
mov xIP0, x0 // x0 = result of call.
mov x0, x20 // Reload method reference.
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Note: will restore xSELF
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME // Note: will restore xSELF
adr xLR, art_quick_instrumentation_exit
br xIP0 // Tail-call method with lr set to art_quick_instrumentation_exit.
END art_quick_instrumentation_entry
@@ -2305,7 +2465,7 @@
ENTRY art_quick_instrumentation_exit
mov xLR, #0 // Clobber LR for later checks.
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_ONLY_FRAME
// We need to save x0 and d0. We could use a callee-save from SETUP_REF_ONLY, but then
// we would need to fully restore it. As there are a lot of callee-save registers, it seems
@@ -2328,7 +2488,7 @@
ldr x0, [sp], 16 // Restore integer result, and drop stack area.
.cfi_adjust_cfa_offset 16
- POP_REFS_ONLY_CALLEE_SAVE_FRAME
+ POP_SAVE_REFS_ONLY_FRAME
br xIP0 // Tail-call out.
END art_quick_instrumentation_exit
@@ -2339,7 +2499,7 @@
*/
.extern artDeoptimize
ENTRY art_quick_deoptimize
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
mov x0, xSELF // Pass thread.
bl artDeoptimize // artDeoptimize(Thread*)
brk 0
@@ -2351,7 +2511,7 @@
*/
.extern artDeoptimizeFromCompiledCode
ENTRY art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
mov x0, xSELF // Pass thread.
bl artDeoptimizeFromCompiledCode // artDeoptimizeFromCompiledCode(Thread*)
brk 0
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/quick_method_frame_info_arm64.h
index 188e46e..36f283b 100644
--- a/runtime/arch/arm64/quick_method_frame_info_arm64.h
+++ b/runtime/arch/arm64/quick_method_frame_info_arm64.h
@@ -78,15 +78,15 @@
constexpr uint32_t Arm64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
return kArm64CalleeSaveAlwaysSpills | kArm64CalleeSaveRefSpills |
- (type == Runtime::kRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAll ? kArm64CalleeSaveAllSpills : 0) |
+ (type == Runtime::kSaveRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
+ (type == Runtime::kSaveAllCalleeSaves ? kArm64CalleeSaveAllSpills : 0) |
(type == Runtime::kSaveEverything ? kArm64CalleeSaveEverythingSpills : 0);
}
constexpr uint32_t Arm64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kArm64CalleeSaveFpAlwaysSpills | kArm64CalleeSaveFpRefSpills |
- (type == Runtime::kRefsAndArgs ? kArm64CalleeSaveFpArgSpills: 0) |
- (type == Runtime::kSaveAll ? kArm64CalleeSaveFpAllSpills : 0) |
+ (type == Runtime::kSaveRefsAndArgs ? kArm64CalleeSaveFpArgSpills: 0) |
+ (type == Runtime::kSaveAllCalleeSaves ? kArm64CalleeSaveFpAllSpills : 0) |
(type == Runtime::kSaveEverything ? kArm64CalleeSaveFpEverythingSpills : 0);
}
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
index 2ef45f5..135b074 100644
--- a/runtime/arch/mips/asm_support_mips.h
+++ b/runtime/arch/mips/asm_support_mips.h
@@ -19,9 +19,9 @@
#include "asm_support.h"
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 96
-#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 48
-#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 80
-#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE 256
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES 96
+#define FRAME_SIZE_SAVE_REFS_ONLY 48
+#define FRAME_SIZE_SAVE_REFS_AND_ARGS 80
+#define FRAME_SIZE_SAVE_EVERYTHING 256
#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 9b24128..3d393f6 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -30,19 +30,19 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveAll)
+ * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
* Callee-save: $s0-$s8 + $gp + $ra, 11 total + 1 word for Method*
* Clobbers $t0 and $sp
* Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
- * Reserves FRAME_SIZE_SAVE_ALL_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack
+ * Reserves FRAME_SIZE_SAVE_ALL_CALLEE_SAVES + ARG_SLOT_SIZE bytes on the stack
*/
-.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
addiu $sp, $sp, -96
.cfi_adjust_cfa_offset 96
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 96)
-#error "SAVE_ALL_CALLEE_SAVE_FRAME(MIPS) size not as expected."
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 96)
+#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(MIPS) size not as expected."
#endif
sw $ra, 92($sp)
@@ -79,7 +79,7 @@
lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
lw $t0, 0($t0)
- lw $t0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($t0)
+ lw $t0, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET($t0)
sw $t0, 0($sp) # Place Method* at bottom of stack.
sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
@@ -88,20 +88,20 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsOnly). Restoration assumes non-moving GC.
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly). Restoration assumes non-moving GC.
* Does not include rSUSPEND or rSELF
* callee-save: $s2-$s8 + $gp + $ra, 9 total + 2 words padding + 1 word to hold Method*
* Clobbers $t0 and $sp
* Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
- * Reserves FRAME_SIZE_REFS_ONLY_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack
+ * Reserves FRAME_SIZE_SAVE_REFS_ONLY + ARG_SLOT_SIZE bytes on the stack
*/
-.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+.macro SETUP_SAVE_REFS_ONLY_FRAME
addiu $sp, $sp, -48
.cfi_adjust_cfa_offset 48
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 48)
-#error "REFS_ONLY_CALLEE_SAVE_FRAME(MIPS) size not as expected."
+#if (FRAME_SIZE_SAVE_REFS_ONLY != 48)
+#error "FRAME_SIZE_SAVE_REFS_ONLY(MIPS) size not as expected."
#endif
sw $ra, 44($sp)
@@ -126,14 +126,14 @@
lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
lw $t0, 0($t0)
- lw $t0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($t0)
+ lw $t0, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET($t0)
sw $t0, 0($sp) # Place Method* at bottom of stack.
sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
.cfi_adjust_cfa_offset ARG_SLOT_SIZE
.endm
-.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+.macro RESTORE_SAVE_REFS_ONLY_FRAME
addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack
.cfi_adjust_cfa_offset -ARG_SLOT_SIZE
lw $ra, 44($sp)
@@ -158,24 +158,24 @@
.cfi_adjust_cfa_offset -48
.endm
-.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+.macro RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN
+ RESTORE_SAVE_REFS_ONLY_FRAME
jalr $zero, $ra
nop
.endm
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsAndArgs).
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs).
* callee-save: $a1-$a3, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method*
*/
-.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
+.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
addiu $sp, $sp, -80
.cfi_adjust_cfa_offset 80
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 80)
-#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(MIPS) size not as expected."
+#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 80)
+#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(MIPS) size not as expected."
#endif
sw $ra, 76($sp)
@@ -209,17 +209,17 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC.
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). Restoration assumes non-moving GC.
* callee-save: $a1-$a3, $f12-$f15, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method*
* Clobbers $t0 and $sp
* Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
- * Reserves FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack
+ * Reserves FRAME_SIZE_SAVE_REFS_AND_ARGS + ARG_SLOT_SIZE bytes on the stack
*/
-.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
+.macro SETUP_SAVE_REFS_AND_ARGS_FRAME
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
lw $t0, 0($t0)
- lw $t0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($t0)
+ lw $t0, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET($t0)
sw $t0, 0($sp) # Place Method* at bottom of stack.
sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
@@ -228,22 +228,22 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC.
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). Restoration assumes non-moving GC.
* callee-save: $a1-$a3, $f12-$f15, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method*
* Clobbers $sp
* Use $a0 as the Method* and loads it into bottom of stack.
* Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
- * Reserves FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack
+ * Reserves FRAME_SIZE_SAVE_REFS_AND_ARGS + ARG_SLOT_SIZE bytes on the stack
*/
-.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
+.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
sw $a0, 0($sp) # Place Method* at bottom of stack.
sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
.cfi_adjust_cfa_offset ARG_SLOT_SIZE
.endm
-.macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME
addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack
.cfi_adjust_cfa_offset -ARG_SLOT_SIZE
lw $ra, 76($sp)
@@ -283,16 +283,16 @@
* 28(GPR)+ 32(FPR) + 3 words for padding and 1 word for Method*
* Clobbers $t0 and $t1.
* Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
- * Reserves FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack.
+ * Reserves FRAME_SIZE_SAVE_EVERYTHING + ARG_SLOT_SIZE bytes on the stack.
* This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
*/
-.macro SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+.macro SETUP_SAVE_EVERYTHING_FRAME
addiu $sp, $sp, -256
.cfi_adjust_cfa_offset 256
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 256)
-#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(MIPS) size not as expected."
+#if (FRAME_SIZE_SAVE_EVERYTHING != 256)
+#error "FRAME_SIZE_SAVE_EVERYTHING(MIPS) size not as expected."
#endif
sw $ra, 252($sp)
@@ -381,14 +381,14 @@
lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
lw $t0, 0($t0)
- lw $t0, RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET($t0)
+ lw $t0, RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET($t0)
sw $t0, 0($sp) # Place Method* at bottom of stack.
sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
.cfi_adjust_cfa_offset ARG_SLOT_SIZE
.endm
-.macro RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+.macro RESTORE_SAVE_EVERYTHING_FRAME
addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack
.cfi_adjust_cfa_offset -ARG_SLOT_SIZE
@@ -478,7 +478,7 @@
* exception is Thread::Current()->exception_
*/
.macro DELIVER_PENDING_EXCEPTION
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME # save callee saves for throw
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME # save callee saves for throw
la $t9, artDeliverPendingExceptionFromCode
jalr $zero, $t9 # artDeliverPendingExceptionFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
@@ -486,7 +486,7 @@
.macro RETURN_IF_NO_EXCEPTION
lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
bnez $t0, 1f # success if no exception is pending
nop
jalr $zero, $ra
@@ -496,7 +496,7 @@
.endm
.macro RETURN_IF_ZERO
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
bnez $v0, 1f # success?
nop
jalr $zero, $ra # return on success
@@ -506,7 +506,7 @@
.endm
.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
beqz $v0, 1f # success?
nop
jalr $zero, $ra # return on success
@@ -686,7 +686,7 @@
* the bottom of the thread. On entry a0 holds Throwable*
*/
ENTRY art_quick_deliver_exception
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
la $t9, artDeliverExceptionFromCode
jalr $zero, $t9 # artDeliverExceptionFromCode(Throwable*, Thread*)
move $a1, rSELF # pass Thread::Current
@@ -697,7 +697,7 @@
*/
.extern artThrowNullPointerExceptionFromCode
ENTRY art_quick_throw_null_pointer_exception
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
la $t9, artThrowNullPointerExceptionFromCode
jalr $zero, $t9 # artThrowNullPointerExceptionFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
@@ -709,7 +709,7 @@
*/
.extern artThrowNullPointerExceptionFromSignal
ENTRY art_quick_throw_null_pointer_exception_from_signal
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
la $t9, artThrowNullPointerExceptionFromSignal
jalr $zero, $t9 # artThrowNullPointerExceptionFromSignal(uintptr_t, Thread*)
move $a1, rSELF # pass Thread::Current
@@ -720,7 +720,7 @@
*/
.extern artThrowDivZeroFromCode
ENTRY art_quick_throw_div_zero
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
la $t9, artThrowDivZeroFromCode
jalr $zero, $t9 # artThrowDivZeroFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
@@ -731,7 +731,7 @@
*/
.extern artThrowArrayBoundsFromCode
ENTRY art_quick_throw_array_bounds
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
la $t9, artThrowArrayBoundsFromCode
jalr $zero, $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -743,7 +743,7 @@
*/
.extern artThrowStringBoundsFromCode
ENTRY art_quick_throw_string_bounds
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
la $t9, artThrowStringBoundsFromCode
jalr $zero, $t9 # artThrowStringBoundsFromCode(index, limit, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -754,7 +754,7 @@
*/
.extern artThrowStackOverflowFromCode
ENTRY art_quick_throw_stack_overflow
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
la $t9, artThrowStackOverflowFromCode
jalr $zero, $t9 # artThrowStackOverflowFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
@@ -765,7 +765,7 @@
*/
.extern artThrowNoSuchMethodFromCode
ENTRY art_quick_throw_no_such_method
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
la $t9, artThrowNoSuchMethodFromCode
jalr $zero, $t9 # artThrowNoSuchMethodFromCode(method_idx, Thread*)
move $a1, rSELF # pass Thread::Current
@@ -788,13 +788,13 @@
*/
.macro INVOKE_TRAMPOLINE_BODY cxx_name
.extern \cxx_name
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC
+ SETUP_SAVE_REFS_AND_ARGS_FRAME # save callee saves in case allocation triggers GC
move $a2, rSELF # pass Thread::Current
la $t9, \cxx_name
jalr $t9 # (method_idx, this, Thread*, $sp)
addiu $a3, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
move $a0, $v0 # save target Method*
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
beqz $v0, 1f
move $t9, $v1 # save $v0->code_
jalr $zero, $t9
@@ -1105,11 +1105,11 @@
*/
.extern artHandleFillArrayDataFromCode
ENTRY art_quick_handle_fill_data
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ lw $a2, 0($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
la $t9, artHandleFillArrayDataFromCode
- jalr $t9 # (payload offset, Array*, method, Thread*)
- move $a3, rSELF # pass Thread::Current
+ jalr $t9 # (payload offset, Array*, method, Thread*)
+ move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_handle_fill_data
@@ -1120,7 +1120,7 @@
ENTRY art_quick_lock_object
beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
nop
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block
la $t9, artLockObjectFromCode
jalr $t9 # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
@@ -1130,7 +1130,7 @@
ENTRY art_quick_lock_object_no_inline
beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
nop
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block
la $t9, artLockObjectFromCode
jalr $t9 # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
@@ -1144,7 +1144,7 @@
ENTRY art_quick_unlock_object
beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
nop
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
la $t9, artUnlockObjectFromCode
jalr $t9 # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
@@ -1154,7 +1154,7 @@
ENTRY art_quick_unlock_object_no_inline
beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
nop
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
la $t9, artUnlockObjectFromCode
jalr $t9 # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
@@ -1190,7 +1190,7 @@
lw $a0, 0($sp)
addiu $sp, $sp, 32
.cfi_adjust_cfa_offset -32
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
la $t9, artThrowClassCastException
jalr $zero, $t9 # artThrowClassCastException (Class*, Class*, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -1343,7 +1343,7 @@
.cfi_adjust_cfa_offset -32
bnez $v0, .Ldo_aput
nop
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
move $a1, $a2
la $t9, artThrowArrayStoreException
jalr $zero, $t9 # artThrowArrayStoreException(Class*, Class*, Thread*)
@@ -1356,7 +1356,7 @@
.extern artGetBooleanStaticFromCode
ENTRY art_quick_get_boolean_static
lw $a1, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artGetBooleanStaticFromCode
jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -1368,7 +1368,7 @@
.extern artGetByteStaticFromCode
ENTRY art_quick_get_byte_static
lw $a1, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artGetByteStaticFromCode
jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -1381,7 +1381,7 @@
.extern artGetCharStaticFromCode
ENTRY art_quick_get_char_static
lw $a1, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artGetCharStaticFromCode
jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -1393,7 +1393,7 @@
.extern artGetShortStaticFromCode
ENTRY art_quick_get_short_static
lw $a1, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artGetShortStaticFromCode
jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -1406,7 +1406,7 @@
.extern artGet32StaticFromCode
ENTRY art_quick_get32_static
lw $a1, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artGet32StaticFromCode
jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -1419,7 +1419,7 @@
.extern artGet64StaticFromCode
ENTRY art_quick_get64_static
lw $a1, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artGet64StaticFromCode
jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -1432,7 +1432,7 @@
.extern artGetObjStaticFromCode
ENTRY art_quick_get_obj_static
lw $a1, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artGetObjStaticFromCode
jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -1445,7 +1445,7 @@
.extern artGetBooleanInstanceFromCode
ENTRY art_quick_get_boolean_instance
lw $a2, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artGetBooleanInstanceFromCode
jalr $t9 # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
@@ -1457,7 +1457,7 @@
.extern artGetByteInstanceFromCode
ENTRY art_quick_get_byte_instance
lw $a2, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artGetByteInstanceFromCode
jalr $t9 # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
@@ -1470,7 +1470,7 @@
.extern artGetCharInstanceFromCode
ENTRY art_quick_get_char_instance
lw $a2, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artGetCharInstanceFromCode
jalr $t9 # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
@@ -1482,7 +1482,7 @@
.extern artGetShortInstanceFromCode
ENTRY art_quick_get_short_instance
lw $a2, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artGetShortInstanceFromCode
jalr $t9 # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
@@ -1495,7 +1495,7 @@
.extern artGet32InstanceFromCode
ENTRY art_quick_get32_instance
lw $a2, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artGet32InstanceFromCode
jalr $t9 # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
@@ -1508,7 +1508,7 @@
.extern artGet64InstanceFromCode
ENTRY art_quick_get64_instance
lw $a2, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artGet64InstanceFromCode
jalr $t9 # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
@@ -1521,7 +1521,7 @@
.extern artGetObjInstanceFromCode
ENTRY art_quick_get_obj_instance
lw $a2, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artGetObjInstanceFromCode
jalr $t9 # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
@@ -1534,7 +1534,7 @@
.extern artSet8StaticFromCode
ENTRY art_quick_set8_static
lw $a2, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artSet8StaticFromCode
jalr $t9 # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
@@ -1547,7 +1547,7 @@
.extern artSet16StaticFromCode
ENTRY art_quick_set16_static
lw $a2, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artSet16StaticFromCode
jalr $t9 # (field_idx, new_val, referrer, Thread*, $sp)
move $a3, rSELF # pass Thread::Current
@@ -1560,7 +1560,7 @@
.extern artSet32StaticFromCode
ENTRY art_quick_set32_static
lw $a2, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artSet32StaticFromCode
jalr $t9 # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
@@ -1574,7 +1574,7 @@
ENTRY art_quick_set64_static
lw $a1, 0($sp) # pass referrer's Method*
# 64 bit new_val is in a2:a3 pair
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artSet64StaticFromCode
jalr $t9 # (field_idx, referrer, new_val, Thread*)
sw rSELF, 16($sp) # pass Thread::Current
@@ -1587,7 +1587,7 @@
.extern artSetObjStaticFromCode
ENTRY art_quick_set_obj_static
lw $a2, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artSetObjStaticFromCode
jalr $t9 # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
@@ -1600,7 +1600,7 @@
.extern artSet8InstanceFromCode
ENTRY art_quick_set8_instance
lw $a3, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artSet8InstanceFromCode
jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
sw rSELF, 16($sp) # pass Thread::Current
@@ -1613,7 +1613,7 @@
.extern artSet16InstanceFromCode
ENTRY art_quick_set16_instance
lw $a3, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artSet16InstanceFromCode
jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
sw rSELF, 16($sp) # pass Thread::Current
@@ -1626,7 +1626,7 @@
.extern artSet32InstanceFromCode
ENTRY art_quick_set32_instance
lw $a3, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artSet32InstanceFromCode
jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
sw rSELF, 16($sp) # pass Thread::Current
@@ -1640,7 +1640,7 @@
ENTRY art_quick_set64_instance
lw $t1, 0($sp) # load referrer's Method*
# 64 bit new_val is in a2:a3 pair
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
sw rSELF, 20($sp) # pass Thread::Current
la $t9, artSet64InstanceFromCode
jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
@@ -1654,7 +1654,7 @@
.extern artSetObjInstanceFromCode
ENTRY art_quick_set_obj_instance
lw $a3, 0($sp) # pass referrer's Method*
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, artSetObjInstanceFromCode
jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
sw rSELF, 16($sp) # pass Thread::Current
@@ -1665,7 +1665,7 @@
.macro ONE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, \entrypoint
jalr $t9
move $a1, rSELF # pass Thread::Current
@@ -1676,7 +1676,7 @@
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, \entrypoint
jalr $t9
move $a2, rSELF # pass Thread::Current
@@ -1687,7 +1687,7 @@
.macro THREE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, \entrypoint
jalr $t9
move $a3, rSELF # pass Thread::Current
@@ -1698,7 +1698,7 @@
.macro FOUR_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
la $t9, \entrypoint
jalr $t9
sw rSELF, 16($sp) # pass Thread::Current
@@ -1809,7 +1809,7 @@
.Lart_quick_alloc_object_rosalloc_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_ONLY_FRAME
la $t9, artAllocObjectFromCodeRosAlloc
jalr $t9
move $a2, $s1 # Pass self as argument.
@@ -1856,11 +1856,11 @@
jalr $zero, $ra
nop
1:
- SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME # save everything for stack crawl
+ SETUP_SAVE_EVERYTHING_FRAME # save everything for stack crawl
la $t9, artTestSuspendFromCode
jalr $t9 # (Thread*)
move $a0, rSELF
- RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_EVERYTHING_FRAME
jalr $zero, $ra
nop
END art_quick_test_suspend
@@ -1871,13 +1871,13 @@
*/
.extern artQuickProxyInvokeHandler
ENTRY art_quick_proxy_invoke_handler
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
move $a2, rSELF # pass Thread::Current
la $t9, artQuickProxyInvokeHandler
jalr $t9 # (Method* proxy method, receiver, Thread*, SP)
addiu $a3, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
bnez $t0, 1f
# don't care if $v0 and/or $v1 are modified, when exception branch taken
MTD $v0, $v1, $f0, $f1 # move float value to return value
@@ -1928,26 +1928,26 @@
.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
move $a2, rSELF # pass Thread::Current
la $t9, artQuickResolutionTrampoline
jalr $t9 # (Method* called, receiver, Thread*, SP)
addiu $a3, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
beqz $v0, 1f
lw $a0, ARG_SLOT_SIZE($sp) # load resolved method to $a0
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
move $t9, $v0 # code pointer must be in $t9 to generate the global pointer
jalr $zero, $t9 # tail call to method
nop
1:
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_resolution_trampoline
.extern artQuickGenericJniTrampoline
.extern artQuickGenericJniEndTrampoline
ENTRY art_quick_generic_jni_trampoline
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
move $s8, $sp # save $sp to $s8
move $s3, $gp # save $gp to $s3
@@ -1994,7 +1994,7 @@
move $sp, $s8 # tear down the alloca
# tear dpown the callee-save frame
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
MTD $v0, $v1, $f0, $f1 # move float value to return value
jalr $zero, $ra
@@ -2008,13 +2008,13 @@
.extern artQuickToInterpreterBridge
ENTRY art_quick_to_interpreter_bridge
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
move $a1, rSELF # pass Thread::Current
la $t9, artQuickToInterpreterBridge
jalr $t9 # (Method* method, Thread*, SP)
addiu $a2, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
bnez $t0, 1f
# don't care if $v0 and/or $v1 are modified, when exception branch taken
MTD $v0, $v1, $f0, $f1 # move float value to return value
@@ -2030,7 +2030,7 @@
.extern artInstrumentationMethodEntryFromCode
.extern artInstrumentationMethodExitFromCode
ENTRY art_quick_instrumentation_entry
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
sw $a0, 28($sp) # save arg0 in free arg slot
move $a3, $ra # pass $ra
la $t9, artInstrumentationMethodEntryFromCode
@@ -2038,7 +2038,7 @@
move $a2, rSELF # pass Thread::Current
move $t9, $v0 # $t9 holds reference to code
lw $a0, 28($sp) # restore arg0 from free arg slot
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
jalr $t9 # call method
nop
END art_quick_instrumentation_entry
@@ -2050,7 +2050,7 @@
.cpload $t9
move $ra, $zero # link register is to here, so clobber with 0 for later checks
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_ONLY_FRAME
addiu $sp, $sp, -16 # allocate temp storage on the stack
.cfi_adjust_cfa_offset 16
sw $v0, ARG_SLOT_SIZE+12($sp)
@@ -2071,8 +2071,8 @@
lw $v1, ARG_SLOT_SIZE+8($sp)
l.d $f0, ARG_SLOT_SIZE($sp)
jalr $zero, $t9 # return
- addiu $sp, $sp, ARG_SLOT_SIZE+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE+16 # restore stack
- .cfi_adjust_cfa_offset -(ARG_SLOT_SIZE+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE+16)
+ addiu $sp, $sp, ARG_SLOT_SIZE+FRAME_SIZE_SAVE_REFS_ONLY+16 # restore stack
+ .cfi_adjust_cfa_offset -(ARG_SLOT_SIZE+FRAME_SIZE_SAVE_REFS_ONLY+16)
END art_quick_instrumentation_exit
/*
@@ -2081,7 +2081,7 @@
*/
.extern artDeoptimize
ENTRY art_quick_deoptimize
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
la $t9, artDeoptimize
jalr $t9 # artDeoptimize(Thread*)
# Returns caller method's frame size.
@@ -2094,7 +2094,7 @@
*/
.extern artDeoptimizeFromCompiledCode
ENTRY art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
la $t9, artDeoptimizeFromCompiledCode
jalr $t9 # artDeoptimizeFromCompiledCode(Thread*)
# Returns caller method's frame size.
diff --git a/runtime/arch/mips/quick_method_frame_info_mips.h b/runtime/arch/mips/quick_method_frame_info_mips.h
index 170513d..90e7b20 100644
--- a/runtime/arch/mips/quick_method_frame_info_mips.h
+++ b/runtime/arch/mips/quick_method_frame_info_mips.h
@@ -61,15 +61,15 @@
constexpr uint32_t MipsCalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
return kMipsCalleeSaveAlwaysSpills | kMipsCalleeSaveRefSpills |
- (type == Runtime::kRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAll ? kMipsCalleeSaveAllSpills : 0) |
+ (type == Runtime::kSaveRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
+ (type == Runtime::kSaveAllCalleeSaves ? kMipsCalleeSaveAllSpills : 0) |
(type == Runtime::kSaveEverything ? kMipsCalleeSaveEverythingSpills : 0);
}
constexpr uint32_t MipsCalleeSaveFPSpills(Runtime::CalleeSaveType type) {
return kMipsCalleeSaveFpAlwaysSpills | kMipsCalleeSaveFpRefSpills |
- (type == Runtime::kRefsAndArgs ? kMipsCalleeSaveFpArgSpills : 0) |
- (type == Runtime::kSaveAll ? kMipsCalleeSaveAllFPSpills : 0) |
+ (type == Runtime::kSaveRefsAndArgs ? kMipsCalleeSaveFpArgSpills : 0) |
+ (type == Runtime::kSaveAllCalleeSaves ? kMipsCalleeSaveAllFPSpills : 0) |
(type == Runtime::kSaveEverything ? kMipsCalleeSaveFpEverythingSpills : 0);
}
diff --git a/runtime/arch/mips64/asm_support_mips64.h b/runtime/arch/mips64/asm_support_mips64.h
index 2c16c25..9063d20 100644
--- a/runtime/arch/mips64/asm_support_mips64.h
+++ b/runtime/arch/mips64/asm_support_mips64.h
@@ -20,12 +20,12 @@
#include "asm_support.h"
// 64 ($f24-$f31) + 64 ($s0-$s7) + 8 ($gp) + 8 ($s8) + 8 ($ra) + 1x8 bytes padding
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 160
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES 160
// 48 ($s2-$s7) + 8 ($gp) + 8 ($s8) + 8 ($ra) + 1x8 bytes padding
-#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 80
+#define FRAME_SIZE_SAVE_REFS_ONLY 80
// $f12-$f19, $a1-$a7, $s2-$s7 + $gp + $s8 + $ra, 16 total + 1x8 bytes padding + method*
-#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 208
+#define FRAME_SIZE_SAVE_REFS_AND_ARGS 208
// $f0-$f31, $at, $v0-$v1, $a0-$a7, $t0-$t3, $s0-$s7, $t8-$t9, $gp, $s8, $ra + padding + method*
-#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE 496
+#define FRAME_SIZE_SAVE_EVERYTHING 496
#endif // ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_H_
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 3469de2..9774eb9 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -41,16 +41,16 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveAll)
+ * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
* callee-save: padding + $f24-$f31 + $s0-$s7 + $gp + $ra + $s8 = 19 total + 1x8 bytes padding
*/
-.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
daddiu $sp, $sp, -160
.cfi_adjust_cfa_offset 160
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 160)
-#error "SAVE_ALL_CALLEE_SAVE_FRAME(MIPS64) size not as expected."
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 160)
+#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(MIPS64) size not as expected."
#endif
sd $ra, 152($sp)
@@ -89,25 +89,25 @@
# load appropriate callee-save-method
ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
ld $t1, 0($t1)
- ld $t1, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($t1)
+ ld $t1, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET($t1)
sd $t1, 0($sp) # Place ArtMethod* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsOnly). Restoration assumes
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly). Restoration assumes
* non-moving GC.
* Does not include rSUSPEND or rSELF
* callee-save: padding + $s2-$s7 + $gp + $ra + $s8 = 9 total + 1x8 bytes padding
*/
-.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+.macro SETUP_SAVE_REFS_ONLY_FRAME
daddiu $sp, $sp, -80
.cfi_adjust_cfa_offset 80
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 80)
-#error "REFS_ONLY_CALLEE_SAVE_FRAME(MIPS64) size not as expected."
+#if (FRAME_SIZE_SAVE_REFS_ONLY != 80)
+#error "FRAME_SIZE_SAVE_REFS_ONLY(MIPS64) size not as expected."
#endif
sd $ra, 72($sp)
@@ -131,12 +131,12 @@
# load appropriate callee-save-method
ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
ld $t1, 0($t1)
- ld $t1, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($t1)
+ ld $t1, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET($t1)
sd $t1, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
-.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+.macro RESTORE_SAVE_REFS_ONLY_FRAME
ld $ra, 72($sp)
.cfi_restore 31
ld $s8, 64($sp)
@@ -160,7 +160,7 @@
.cpreturn
.endm
-.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+.macro RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN
ld $ra, 72($sp)
.cfi_restore 31
ld $s8, 64($sp)
@@ -186,15 +186,15 @@
.endm
// This assumes the top part of these stack frame types are identical.
-#define REFS_AND_ARGS_MINUS_REFS_SIZE (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
+#define REFS_AND_ARGS_MINUS_REFS_SIZE (FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
-.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
+.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL
daddiu $sp, $sp, -208
.cfi_adjust_cfa_offset 208
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 208)
-#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(MIPS64) size not as expected."
+#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 208)
+#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(MIPS64) size not as expected."
#endif
sd $ra, 200($sp) # = kQuickCalleeSaveFrame_RefAndArgs_LrOffset
@@ -244,27 +244,27 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). Restoration assumes
* non-moving GC.
* callee-save: padding + $f12-$f19 + $a1-$a7 + $s2-$s7 + $gp + $ra + $s8 = 24 total + 1 words padding + Method*
*/
-.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
+.macro SETUP_SAVE_REFS_AND_ARGS_FRAME
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL
# load appropriate callee-save-method
ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
ld $t1, 0($t1)
- ld $t1, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($t1)
+ ld $t1, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET($t1)
sd $t1, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
-.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
+.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL
sd $a0, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
-.macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME
ld $ra, 200($sp)
.cfi_restore 31
ld $s8, 192($sp)
@@ -320,13 +320,13 @@
* $f0-$f31; 28(GPR)+ 32(FPR) + 1x8 bytes padding + method*
* This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
*/
-.macro SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+.macro SETUP_SAVE_EVERYTHING_FRAME
daddiu $sp, $sp, -496
.cfi_adjust_cfa_offset 496
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 496)
-#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(MIPS64) size not as expected."
+#if (FRAME_SIZE_SAVE_EVERYTHING != 496)
+#error "FRAME_SIZE_SAVE_EVERYTHING(MIPS64) size not as expected."
#endif
// Save core registers.
@@ -430,13 +430,13 @@
# load appropriate callee-save-method
ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
ld $t1, 0($t1)
- ld $t1, RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET($t1)
+ ld $t1, RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET($t1)
sd $t1, 0($sp) # Place ArtMethod* at bottom of stack.
# Place sp in Thread::Current()->top_quick_frame.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
.endm
-.macro RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+.macro RESTORE_SAVE_EVERYTHING_FRAME
// Restore FP registers.
l.d $f31, 264($sp)
l.d $f30, 256($sp)
@@ -542,7 +542,7 @@
*/
.macro DELIVER_PENDING_EXCEPTION
SETUP_GP
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME # save callee saves for throw
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME # save callee saves for throw
dla $t9, artDeliverPendingExceptionFromCode
jalr $zero, $t9 # artDeliverPendingExceptionFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
@@ -550,7 +550,7 @@
.macro RETURN_IF_NO_EXCEPTION
ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
bne $t0, $zero, 1f # success if no exception is pending
nop
jalr $zero, $ra
@@ -560,7 +560,7 @@
.endm
.macro RETURN_IF_ZERO
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
bne $v0, $zero, 1f # success?
nop
jalr $zero, $ra # return on success
@@ -570,7 +570,7 @@
.endm
.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
beq $v0, $zero, 1f # success?
nop
jalr $zero, $ra # return on success
@@ -796,7 +796,7 @@
* the bottom of the thread. On entry a0 holds Throwable*
*/
ENTRY art_quick_deliver_exception
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
dla $t9, artDeliverExceptionFromCode
jalr $zero, $t9 # artDeliverExceptionFromCode(Throwable*, Thread*)
move $a1, rSELF # pass Thread::Current
@@ -808,7 +808,7 @@
.extern artThrowNullPointerExceptionFromCode
ENTRY art_quick_throw_null_pointer_exception
.Lart_quick_throw_null_pointer_exception_gp_set:
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
dla $t9, artThrowNullPointerExceptionFromCode
jalr $zero, $t9 # artThrowNullPointerExceptionFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
@@ -819,7 +819,7 @@
*/
.extern artThrowNullPointerExceptionFromSignal
ENTRY art_quick_throw_null_pointer_exception_from_signal
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
dla $t9, artThrowNullPointerExceptionFromSignal
jalr $zero, $t9 # artThrowNullPointerExceptionFromSignal(uinptr_t, Thread*)
move $a1, rSELF # pass Thread::Current
@@ -830,7 +830,7 @@
*/
.extern artThrowDivZeroFromCode
ENTRY art_quick_throw_div_zero
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
dla $t9, artThrowDivZeroFromCode
jalr $zero, $t9 # artThrowDivZeroFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
@@ -843,7 +843,7 @@
.extern artThrowArrayBoundsFromCode
ENTRY art_quick_throw_array_bounds
.Lart_quick_throw_array_bounds_gp_set:
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
dla $t9, artThrowArrayBoundsFromCode
jalr $zero, $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -856,7 +856,7 @@
.extern artThrowStringBoundsFromCode
ENTRY art_quick_throw_string_bounds
.Lart_quick_throw_string_bounds_gp_set:
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
dla $t9, artThrowStringBoundsFromCode
jalr $zero, $t9 # artThrowStringBoundsFromCode(index, limit, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -867,7 +867,7 @@
*/
.extern artThrowStackOverflowFromCode
ENTRY art_quick_throw_stack_overflow
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
dla $t9, artThrowStackOverflowFromCode
jalr $zero, $t9 # artThrowStackOverflowFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
@@ -878,7 +878,7 @@
*/
.extern artThrowNoSuchMethodFromCode
ENTRY art_quick_throw_no_such_method
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
dla $t9, artThrowNoSuchMethodFromCode
jalr $zero, $t9 # artThrowNoSuchMethodFromCode(method_idx, Thread*)
move $a1, rSELF # pass Thread::Current
@@ -902,13 +902,13 @@
*/
.macro INVOKE_TRAMPOLINE_BODY cxx_name
.extern \cxx_name
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC
+ SETUP_SAVE_REFS_AND_ARGS_FRAME # save callee saves in case allocation triggers GC
move $a2, rSELF # pass Thread::Current
jal \cxx_name # (method_idx, this, Thread*, $sp)
move $a3, $sp # pass $sp
move $a0, $v0 # save target Method*
move $t9, $v1 # save $v0->code_
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
beq $v0, $zero, 1f
nop
jalr $zero, $t9
@@ -1197,8 +1197,8 @@
*/
.extern artHandleFillArrayDataFromCode
ENTRY art_quick_handle_fill_data
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
- ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
+ ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artHandleFillArrayDataFromCode # (payload offset, Array*, method, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1211,7 +1211,7 @@
ENTRY art_quick_lock_object
beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
nop
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block
jal artLockObjectFromCode # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1220,7 +1220,7 @@
ENTRY art_quick_lock_object_no_inline
beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
nop
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block
jal artLockObjectFromCode # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1233,7 +1233,7 @@
ENTRY art_quick_unlock_object
beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
nop
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
jal artUnlockObjectFromCode # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1242,7 +1242,7 @@
ENTRY art_quick_unlock_object_no_inline
beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
nop
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
jal artUnlockObjectFromCode # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1275,7 +1275,7 @@
daddiu $sp, $sp, 32
.cfi_adjust_cfa_offset -32
SETUP_GP
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
dla $t9, artThrowClassCastException
jalr $zero, $t9 # artThrowClassCastException (Class*, Class*, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -1423,7 +1423,7 @@
SETUP_GP
bne $v0, $zero, .Ldo_aput
nop
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
move $a1, $a2
dla $t9, artThrowArrayStoreException
jalr $zero, $t9 # artThrowArrayStoreException(Class*, Class*, Thread*)
@@ -1435,8 +1435,8 @@
*/
.extern artGetBooleanStaticFromCode
ENTRY art_quick_get_boolean_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artGetBooleanStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1447,8 +1447,8 @@
*/
.extern artGetByteStaticFromCode
ENTRY art_quick_get_byte_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artGetByteStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1459,8 +1459,8 @@
*/
.extern artGetCharStaticFromCode
ENTRY art_quick_get_char_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artGetCharStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1471,8 +1471,8 @@
*/
.extern artGetShortStaticFromCode
ENTRY art_quick_get_short_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artGetShortStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1483,8 +1483,8 @@
*/
.extern artGet32StaticFromCode
ENTRY art_quick_get32_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1495,8 +1495,8 @@
*/
.extern artGet64StaticFromCode
ENTRY art_quick_get64_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1507,8 +1507,8 @@
*/
.extern artGetObjStaticFromCode
ENTRY art_quick_get_obj_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1519,8 +1519,8 @@
*/
.extern artGetBooleanInstanceFromCode
ENTRY art_quick_get_boolean_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artGetBooleanInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1531,8 +1531,8 @@
*/
.extern artGetByteInstanceFromCode
ENTRY art_quick_get_byte_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artGetByteInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1543,8 +1543,8 @@
*/
.extern artGetCharInstanceFromCode
ENTRY art_quick_get_char_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artGetCharInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1555,8 +1555,8 @@
*/
.extern artGetShortInstanceFromCode
ENTRY art_quick_get_short_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artGetShortInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1567,8 +1567,8 @@
*/
.extern artGet32InstanceFromCode
ENTRY art_quick_get32_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1579,8 +1579,8 @@
*/
.extern artGet64InstanceFromCode
ENTRY art_quick_get64_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1591,8 +1591,8 @@
*/
.extern artGetObjInstanceFromCode
ENTRY art_quick_get_obj_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1603,8 +1603,8 @@
*/
.extern artSet8StaticFromCode
ENTRY art_quick_set8_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artSet8StaticFromCode # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1615,8 +1615,8 @@
*/
.extern artSet16StaticFromCode
ENTRY art_quick_set16_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artSet16StaticFromCode # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1627,8 +1627,8 @@
*/
.extern artSet32StaticFromCode
ENTRY art_quick_set32_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1639,9 +1639,9 @@
*/
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
# a2 contains the new val
- ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1652,8 +1652,8 @@
*/
.extern artSetObjStaticFromCode
ENTRY art_quick_set_obj_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1664,8 +1664,8 @@
*/
.extern artSet8InstanceFromCode
ENTRY art_quick_set8_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artSet8InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
move $a4, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1676,8 +1676,8 @@
*/
.extern artSet16InstanceFromCode
ENTRY art_quick_set16_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artSet16InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
move $a4, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1688,8 +1688,8 @@
*/
.extern artSet32InstanceFromCode
ENTRY art_quick_set32_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
move $a4, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1700,8 +1700,8 @@
*/
.extern artSet64InstanceFromCode
ENTRY art_quick_set64_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artSet64InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
move $a4, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1712,8 +1712,8 @@
*/
.extern artSetObjInstanceFromCode
ENTRY art_quick_set_obj_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
move $a4, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1723,7 +1723,7 @@
.macro ONE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
jal \entrypoint
move $a1, rSELF # pass Thread::Current
\return
@@ -1734,7 +1734,7 @@
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
jal \entrypoint
move $a2, rSELF # pass Thread::Current
\return
@@ -1744,7 +1744,7 @@
.macro THREE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
jal \entrypoint
move $a3, rSELF # pass Thread::Current
\return
@@ -1754,7 +1754,7 @@
.macro FOUR_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
jal \entrypoint
move $a4, rSELF # pass Thread::Current
\return
@@ -1856,7 +1856,7 @@
.cpreturn # Restore gp from t8 in branch delay slot.
.Lart_quick_alloc_object_rosalloc_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_ONLY_FRAME
jal artAllocObjectFromCodeRosAlloc
move $a2 ,$s1 # Pass self as argument.
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
@@ -1902,10 +1902,10 @@
jalr $zero, $ra
nop
1:
- SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME # save everything for stack crawl
+ SETUP_SAVE_EVERYTHING_FRAME # save everything for stack crawl
jal artTestSuspendFromCode # (Thread*)
move $a0, rSELF
- RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_EVERYTHING_FRAME
jalr $zero, $ra
nop
END art_quick_test_suspend
@@ -1916,13 +1916,13 @@
*/
.extern artQuickProxyInvokeHandler
ENTRY art_quick_proxy_invoke_handler
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
move $a2, rSELF # pass Thread::Current
jal artQuickProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP)
move $a3, $sp # pass $sp
ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
daddiu $sp, $sp, REFS_AND_ARGS_MINUS_REFS_SIZE # skip a0-a7 and f12-f19
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
bne $t0, $zero, 1f
dmtc1 $v0, $f0 # place return value to FP return value
jalr $zero, $ra
@@ -1971,26 +1971,26 @@
.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
move $a2, rSELF # pass Thread::Current
jal artQuickResolutionTrampoline # (Method* called, receiver, Thread*, SP)
move $a3, $sp # pass $sp
beq $v0, $zero, 1f
ld $a0, 0($sp) # load resolved method in $a0
# artQuickResolutionTrampoline puts resolved method in *SP
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
move $t9, $v0 # code pointer must be in $t9 to generate the global pointer
jalr $zero, $t9 # tail call to method
nop
1:
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_resolution_trampoline
.extern artQuickGenericJniTrampoline
.extern artQuickGenericJniEndTrampoline
ENTRY art_quick_generic_jni_trampoline
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
move $s8, $sp # save $sp
# prepare for call to artQuickGenericJniTrampoline(Thread*, SP)
@@ -2040,7 +2040,7 @@
move $sp, $s8 # tear down the alloca
# tear dpown the callee-save frame
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
jalr $zero, $ra
dmtc1 $v0, $f0 # place return value to FP return value
@@ -2053,13 +2053,13 @@
.extern artQuickToInterpreterBridge
ENTRY art_quick_to_interpreter_bridge
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
move $a1, rSELF # pass Thread::Current
jal artQuickToInterpreterBridge # (Method* method, Thread*, SP)
move $a2, $sp # pass $sp
ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
daddiu $sp, $sp, REFS_AND_ARGS_MINUS_REFS_SIZE # skip a0-a7 and f12-f19
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
bne $t0, $zero, 1f
dmtc1 $v0, $f0 # place return value to FP return value
jalr $zero, $ra
@@ -2074,7 +2074,7 @@
.extern artInstrumentationMethodEntryFromCode
.extern artInstrumentationMethodExitFromCode
ENTRY art_quick_instrumentation_entry
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
daddiu $sp, $sp, -16 # space for saving arg0
.cfi_adjust_cfa_offset 16
sd $a0, 0($sp) # save arg0
@@ -2085,7 +2085,7 @@
ld $a0, 0($sp) # restore arg0
daddiu $sp, $sp, 16 # remove args
.cfi_adjust_cfa_offset -16
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
jalr $t9 # call method
nop
END art_quick_instrumentation_entry
@@ -2095,7 +2095,7 @@
.cfi_startproc
SETUP_GP
move $ra, $zero # link register is to here, so clobber with 0 for later checks
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_ONLY_FRAME
move $t0, $sp # remember bottom of caller's frame
daddiu $sp, $sp, -16 # save return values and set up args
.cfi_adjust_cfa_offset 16
@@ -2115,8 +2115,9 @@
ld $v0, 0($sp) # restore return values
l.d $f0, 8($sp)
jalr $zero, $t9 # return
- daddiu $sp, $sp, 16+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE # 16 bytes of saved values + ref_only callee save frame
- .cfi_adjust_cfa_offset -(16+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
+ # restore stack, 16 bytes of saved values + ref_only callee save frame
+ daddiu $sp, $sp, 16+FRAME_SIZE_SAVE_REFS_ONLY
+ .cfi_adjust_cfa_offset -(16+FRAME_SIZE_SAVE_REFS_ONLY)
END art_quick_instrumentation_exit
/*
@@ -2126,7 +2127,7 @@
.extern artDeoptimize
.extern artEnterInterpreterFromDeoptimize
ENTRY art_quick_deoptimize
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
jal artDeoptimize # artDeoptimize(Thread*, SP)
# Returns caller method's frame size.
move $a0, rSELF # pass Thread::current
@@ -2138,7 +2139,7 @@
*/
.extern artDeoptimizeFromCompiledCode
ENTRY art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
jal artDeoptimizeFromCompiledCode # artDeoptimizeFromCompiledCode(Thread*, SP)
# Returns caller method's frame size.
move $a0, rSELF # pass Thread::current
diff --git a/runtime/arch/mips64/quick_method_frame_info_mips64.h b/runtime/arch/mips64/quick_method_frame_info_mips64.h
index d52945f..397776e 100644
--- a/runtime/arch/mips64/quick_method_frame_info_mips64.h
+++ b/runtime/arch/mips64/quick_method_frame_info_mips64.h
@@ -71,15 +71,15 @@
constexpr uint32_t Mips64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
return kMips64CalleeSaveAlwaysSpills | kMips64CalleeSaveRefSpills |
- (type == Runtime::kRefsAndArgs ? kMips64CalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAll ? kMips64CalleeSaveAllSpills : 0) |
+ (type == Runtime::kSaveRefsAndArgs ? kMips64CalleeSaveArgSpills : 0) |
+ (type == Runtime::kSaveAllCalleeSaves ? kMips64CalleeSaveAllSpills : 0) |
(type == Runtime::kSaveEverything ? kMips64CalleeSaveEverythingSpills : 0);
}
constexpr uint32_t Mips64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kMips64CalleeSaveFpRefSpills |
- (type == Runtime::kRefsAndArgs ? kMips64CalleeSaveFpArgSpills: 0) |
- (type == Runtime::kSaveAll ? kMips64CalleeSaveFpAllSpills : 0) |
+ (type == Runtime::kSaveRefsAndArgs ? kMips64CalleeSaveFpArgSpills: 0) |
+ (type == Runtime::kSaveAllCalleeSaves ? kMips64CalleeSaveFpAllSpills : 0) |
(type == Runtime::kSaveEverything ? kMips64CalleeSaveFpEverythingSpills : 0);
}
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index 290769b..fa86bf4 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -87,6 +87,27 @@
ONE_ARG_DOWNCALL art_quick_alloc_string_from_string ## c_suffix, artAllocStringFromStringFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_REGION_TLAB_ALLOCATOR
+.endm
+
+.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_REGION_TLAB_ALLOCATOR
+// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
+.endm
+
+.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc)
@@ -219,20 +240,6 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_instrumented, RegionInstrumented)
-// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
-
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab_instrumented, RegionTLABInstrumented)
diff --git a/runtime/arch/x86/asm_support_x86.h b/runtime/arch/x86/asm_support_x86.h
index ba5fd99..2bba08d 100644
--- a/runtime/arch/x86/asm_support_x86.h
+++ b/runtime/arch/x86/asm_support_x86.h
@@ -19,9 +19,9 @@
#include "asm_support.h"
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 32
-#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 32
-#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE (32 + 32)
-#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE (48 + 64)
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES 32
+#define FRAME_SIZE_SAVE_REFS_ONLY 32
+#define FRAME_SIZE_SAVE_REFS_AND_ARGS (32 + 32)
+#define FRAME_SIZE_SAVE_EVERYTHING (48 + 64)
#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 68ba0cf..2e9682e 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -22,9 +22,9 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveAll)
+ * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
*/
-MACRO2(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME, got_reg, temp_reg)
+MACRO2(SETUP_SAVE_ALL_CALLEE_SAVES_FRAME, got_reg, temp_reg)
PUSH edi // Save callee saves (ebx is saved/restored by the upcall)
PUSH esi
PUSH ebp
@@ -35,22 +35,22 @@
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
- pushl RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
+ pushl RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the top quick frame.
movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +4: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 3*4 + 16 + 4)
-#error "SAVE_ALL_CALLEE_SAVE_FRAME(X86) size not as expected."
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 3*4 + 16 + 4)
+#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(X86) size not as expected."
#endif
END_MACRO
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsOnly)
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly)
*/
-MACRO2(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME, got_reg, temp_reg)
+MACRO2(SETUP_SAVE_REFS_ONLY_FRAME, got_reg, temp_reg)
PUSH edi // Save callee saves (ebx is saved/restored by the upcall)
PUSH esi
PUSH ebp
@@ -61,24 +61,24 @@
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
- pushl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
+ pushl RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the top quick frame.
movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +4: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 3*4 + 16 + 4)
-#error "REFS_ONLY_CALLEE_SAVE_FRAME(X86) size not as expected."
+#if (FRAME_SIZE_SAVE_REFS_ONLY != 3*4 + 16 + 4)
+#error "FRAME_SIZE_SAVE_REFS_ONLY(X86) size not as expected."
#endif
END_MACRO
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsOnly)
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly)
* and preserves the value of got_reg at entry.
*/
-MACRO2(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME_PRESERVE_GOT_REG, got_reg, temp_reg)
+MACRO2(SETUP_SAVE_REFS_ONLY_FRAME_PRESERVE_GOT_REG, got_reg, temp_reg)
PUSH edi // Save callee saves (ebx is saved/restored by the upcall)
PUSH esi
PUSH ebp
@@ -91,7 +91,7 @@
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
- pushl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
+ pushl RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the top quick frame.
movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
@@ -101,12 +101,12 @@
// Ugly compile-time check, but we only have the preprocessor.
// Last +4: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 3*4 + 16 + 4)
-#error "REFS_ONLY_CALLEE_SAVE_FRAME(X86) size not as expected."
+#if (FRAME_SIZE_SAVE_REFS_ONLY != 3*4 + 16 + 4)
+#error "FRAME_SIZE_SAVE_REFS_ONLY(X86) size not as expected."
#endif
END_MACRO
-MACRO0(RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME)
+MACRO0(RESTORE_SAVE_REFS_ONLY_FRAME)
addl MACRO_LITERAL(16), %esp // Unwind stack up to saved values
CFI_ADJUST_CFA_OFFSET(-16)
POP ebp // Restore callee saves (ebx is saved/restored by the upcall)
@@ -116,9 +116,9 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs)
*/
-MACRO2(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME, got_reg, temp_reg)
+MACRO2(SETUP_SAVE_REFS_AND_ARGS_FRAME, got_reg, temp_reg)
PUSH edi // Save callee saves
PUSH esi
PUSH ebp
@@ -139,23 +139,23 @@
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
- pushl RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
+ pushl RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the stop quick frame.
movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +4: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 7*4 + 4*8 + 4)
-#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(X86) size not as expected."
+#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 7*4 + 4*8 + 4)
+#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(X86) size not as expected."
#endif
END_MACRO
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsAndArgs) where the method is passed in EAX.
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs) where the method is passed in EAX.
*/
-MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_EAX)
+MACRO0(SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_EAX)
// Save callee and GPR args, mixed together to agree with core spills bitmap.
PUSH edi // Save callee saves
PUSH esi
@@ -179,7 +179,7 @@
movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
END_MACRO
-MACRO0(RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME)
+MACRO0(RESTORE_SAVE_REFS_AND_ARGS_FRAME)
// Restore FPRs. EAX is still on the stack.
movsd 4(%esp), %xmm0
movsd 12(%esp), %xmm1
@@ -200,7 +200,7 @@
// Restore register and jump to routine
// Inputs: EDI contains pointer to code.
// Notes: Need to pop EAX too (restores Method*)
-MACRO0(RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME_AND_JUMP)
+MACRO0(RESTORE_SAVE_REFS_AND_ARGS_FRAME_AND_JUMP)
POP eax // Restore Method*
// Restore FPRs.
@@ -225,7 +225,7 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveEverything)
*/
-MACRO2(SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME, got_reg, temp_reg)
+MACRO2(SETUP_SAVE_EVERYTHING_FRAME, got_reg, temp_reg)
// Save core registers.
PUSH edi
PUSH esi
@@ -252,19 +252,19 @@
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save everything callee-save method.
- pushl RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
+ pushl RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the stop quick frame.
movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +4: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 7*4 + 8*8 + 12 + 4 + 4)
-#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(X86) size not as expected."
+#if (FRAME_SIZE_SAVE_EVERYTHING != 7*4 + 8*8 + 12 + 4 + 4)
+#error "FRAME_SIZE_SAVE_EVERYTHING(X86) size not as expected."
#endif
END_MACRO
-MACRO0(RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME)
+MACRO0(RESTORE_SAVE_EVERYTHING_FRAME)
// Restore FPRs. Method and padding is still on the stack.
movsd 16(%esp), %xmm0
movsd 24(%esp), %xmm1
@@ -294,7 +294,7 @@
* exception is Thread::Current()->exception_.
*/
MACRO0(DELIVER_PENDING_EXCEPTION)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save callee saves for throw
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx // save callee saves for throw
// Outgoing argument set up
subl MACRO_LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
@@ -306,20 +306,20 @@
MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
- subl MACRO_LITERAL(12), %esp // alignment padding
+ subl MACRO_LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- call CALLVAR(cxx_name) // cxx_name(Thread*)
+ call CALLVAR(cxx_name) // cxx_name(Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx // save all registers as basis for long jump context
mov %esp, %ecx
// Outgoing argument set up
subl MACRO_LITERAL(8), %esp // alignment padding
@@ -334,7 +334,7 @@
MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
PUSH eax // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
@@ -405,7 +405,7 @@
* pointing back to the original caller.
*/
MACRO1(INVOKE_TRAMPOLINE_BODY, cxx_name)
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME ebx, ebx
+ SETUP_SAVE_REFS_AND_ARGS_FRAME ebx, ebx
movl %esp, %edx // remember SP
// Outgoing argument set up
@@ -731,7 +731,7 @@
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
subl MACRO_LITERAL(8), %esp // push padding
CFI_ADJUST_CFA_OFFSET(8)
@@ -741,14 +741,14 @@
call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
PUSH eax // push padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
@@ -758,14 +758,14 @@
call CALLVAR(cxx_name) // cxx_name(arg1, arg2, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
@@ -775,14 +775,14 @@
call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(FOUR_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME_PRESERVE_GOT_REG ebx, ebx // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME_PRESERVE_GOT_REG ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
subl MACRO_LITERAL(12), %esp // alignment padding
@@ -796,16 +796,16 @@
call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, arg4, Thread*)
addl MACRO_LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ecx // get referrer
+ mov FRAME_SIZE_SAVE_REFS_ONLY(%esp), %ecx // get referrer
PUSH eax // push padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
@@ -814,16 +814,16 @@
call CALLVAR(cxx_name) // cxx_name(arg1, referrer, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %edx // get referrer
+ mov FRAME_SIZE_SAVE_REFS_ONLY(%esp), %edx // get referrer
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // pass referrer
@@ -832,16 +832,16 @@
call CALLVAR(cxx_name) // cxx_name(arg1, arg2, referrer, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ebx // get referrer
+ mov FRAME_SIZE_SAVE_REFS_ONLY(%esp), %ebx // get referrer
subl MACRO_LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
@@ -854,7 +854,7 @@
// Thread*)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
@@ -970,7 +970,7 @@
ret
.Lart_quick_alloc_object_rosalloc_slow_path:
POP edi
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
PUSH eax // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
@@ -978,9 +978,9 @@
PUSH ecx
PUSH eax
call SYMBOL(artAllocObjectFromCodeRosAlloc) // cxx_name(arg0, arg1, Thread*)
- addl LITERAL(16), %esp // pop arguments
+ addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // resotre frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_FUNCTION art_quick_alloc_object_rosalloc
@@ -988,59 +988,59 @@
//
// EAX: type_idx/return_value, ECX: ArtMethod*, EDX: the class.
MACRO1(ALLOC_OBJECT_TLAB_FAST_PATH, slowPathLabel)
- testl %edx, %edx // Check null class
+ testl %edx, %edx // Check null class
jz VAR(slowPathLabel)
- // Check class status.
+ // Check class status.
cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%edx)
jne VAR(slowPathLabel)
- // No fake dependence needed on x86
- // between status and flags load,
- // since each load is a load-acquire,
- // no loads reordering.
- // Check access flags has
- // kAccClassIsFinalizable
+ // No fake dependence needed on x86
+ // between status and flags load,
+ // since each load is a load-acquire,
+ // no loads reordering.
+ // Check access flags has
+ // kAccClassIsFinalizable
testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%edx)
jnz VAR(slowPathLabel)
- movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
- movl THREAD_LOCAL_END_OFFSET(%ebx), %edi // Load thread_local_end.
- subl THREAD_LOCAL_POS_OFFSET(%ebx), %edi // Compute the remaining buffer size.
- movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%edx), %esi // Load the object size.
- cmpl %edi, %esi // Check if it fits. OK to do this
- // before rounding up the object size
- // assuming the buf size alignment.
+ movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
+ movl THREAD_LOCAL_END_OFFSET(%ebx), %edi // Load thread_local_end.
+ subl THREAD_LOCAL_POS_OFFSET(%ebx), %edi // Compute the remaining buffer size.
+ movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%edx), %esi // Load the object size.
+ cmpl %edi, %esi // Check if it fits. OK to do this
+ // before rounding up the object size
+ // assuming the buf size alignment.
ja VAR(slowPathLabel)
- addl LITERAL(OBJECT_ALIGNMENT_MASK), %esi // Align the size by 8. (addr + 7) & ~7.
+ addl LITERAL(OBJECT_ALIGNMENT_MASK), %esi // Align the size by 8. (addr + 7) & ~7.
andl LITERAL(OBJECT_ALIGNMENT_MASK_TOGGLED), %esi
- movl THREAD_LOCAL_POS_OFFSET(%ebx), %eax // Load thread_local_pos
- // as allocated object.
- addl %eax, %esi // Add the object size.
- movl %esi, THREAD_LOCAL_POS_OFFSET(%ebx) // Update thread_local_pos.
- addl LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%ebx) // Increase thread_local_objects.
- // Store the class pointer in the header.
- // No fence needed for x86.
+ movl THREAD_LOCAL_POS_OFFSET(%ebx), %eax // Load thread_local_pos
+ // as allocated object.
+ addl %eax, %esi // Add the object size.
+ movl %esi, THREAD_LOCAL_POS_OFFSET(%ebx) // Update thread_local_pos.
+ addl LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%ebx) // Increase thread_local_objects.
+ // Store the class pointer in the header.
+ // No fence needed for x86.
POISON_HEAP_REF edx
movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%eax)
POP edi
POP esi
- ret // Fast path succeeded.
+ ret // Fast path succeeded.
END_MACRO
// The common slow path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
MACRO1(ALLOC_OBJECT_TLAB_SLOW_PATH, cxx_name)
POP edi
POP esi
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- PUSH eax // alignment padding
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ PUSH eax // alignment padding
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx
PUSH eax
- call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
addl LITERAL(16), %esp
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
@@ -1054,9 +1054,9 @@
#endif
PUSH esi
PUSH edi
- movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array
+ movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array
// Might need to break down into multiple instructions to get the base address in a register.
- // Load the class
+ // Load the class
movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
.Lart_quick_alloc_object_tlab_slow_path:
@@ -1074,11 +1074,11 @@
#endif
PUSH esi
PUSH edi
- movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array
+ movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array
// Might need to break down into multiple instructions to get the base address in a register.
- // Load the class
+ // Load the class
movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
- // Read barrier for class load.
+ // Read barrier for class load.
cmpl LITERAL(0), %fs:THREAD_IS_GC_MARKING_OFFSET
jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
// Null check so that we can load the lock word.
@@ -1094,10 +1094,10 @@
PUSH eax
PUSH ecx
// Outgoing argument set up
- subl MACRO_LITERAL(8), %esp // Alignment padding
+ subl MACRO_LITERAL(8), %esp // Alignment padding
CFI_ADJUST_CFA_OFFSET(8)
- PUSH edx // Pass the class as the first param.
- call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj)
+ PUSH edx // Pass the class as the first param.
+ call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj)
movl %eax, %edx
addl MACRO_LITERAL(12), %esp
CFI_ADJUST_CFA_OFFSET(-12)
@@ -1154,7 +1154,7 @@
movl %ecx, %eax // restore eax
jmp .Lretry_lock
.Lslow_lock:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
@@ -1164,12 +1164,12 @@
call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object
DEFINE_FUNCTION art_quick_lock_object_no_inline
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
@@ -1179,7 +1179,7 @@
call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object_no_inline
@@ -1225,7 +1225,7 @@
movl %edx, %eax // restore eax
jmp .Lretry_unlock
.Lslow_unlock:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
@@ -1235,12 +1235,12 @@
call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object
DEFINE_FUNCTION art_quick_unlock_object_no_inline
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
@@ -1250,7 +1250,7 @@
call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object_no_inline
@@ -1281,7 +1281,7 @@
POP ecx
addl LITERAL(4), %esp
CFI_ADJUST_CFA_OFFSET(-4)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
PUSH eax // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
@@ -1433,7 +1433,7 @@
POP edx
POP ecx
POP eax
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
PUSH eax // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
@@ -1456,7 +1456,7 @@
END_FUNCTION art_quick_memcpy
DEFINE_FUNCTION art_quick_test_suspend
- SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME ebx, ebx // save everything for GC
+ SETUP_SAVE_EVERYTHING_FRAME ebx, ebx // save everything for GC
// Outgoing argument set up
subl MACRO_LITERAL(12), %esp // push padding
CFI_ADJUST_CFA_OFFSET(12)
@@ -1465,7 +1465,7 @@
call SYMBOL(artTestSuspendFromCode) // (Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_EVERYTHING_FRAME // restore frame up to return address
ret // return
END_FUNCTION art_quick_test_suspend
@@ -1592,14 +1592,14 @@
// Call artSet64InstanceFromCode with 4 word size arguments and the referrer.
DEFINE_FUNCTION art_quick_set64_instance
movd %ebx, %xmm0
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
movd %xmm0, %ebx
// Outgoing argument set up
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- pushl (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE+12)(%esp) // pass referrer
+ pushl (FRAME_SIZE_SAVE_REFS_ONLY+12)(%esp) // pass referrer
CFI_ADJUST_CFA_OFFSET(4)
PUSH ebx // pass high half of new_val
PUSH edx // pass low half of new_val
@@ -1608,7 +1608,7 @@
call SYMBOL(artSet64InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set64_instance
@@ -1617,9 +1617,9 @@
DEFINE_FUNCTION art_quick_set64_static
// TODO: Implement SETUP_GOT_NOSAVE for got_reg = ecx to avoid moving around the registers.
movd %ebx, %xmm0
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
movd %xmm0, %ebx
- mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ecx // get referrer
+ mov FRAME_SIZE_SAVE_REFS_ONLY(%esp), %ecx // get referrer
subl LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
@@ -1631,12 +1631,12 @@
call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set64_static
DEFINE_FUNCTION art_quick_proxy_invoke_handler
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_EAX
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_EAX
PUSH esp // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
@@ -1646,9 +1646,9 @@
movd %eax, %xmm0 // place return value also into floating point return value
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
- addl LITERAL(16 + FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE), %esp
- CFI_ADJUST_CFA_OFFSET(-(16 + FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE))
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ addl LITERAL(16 + FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY), %esp
+ CFI_ADJUST_CFA_OFFSET(-(16 + FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY))
+ RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_proxy_invoke_handler
@@ -1693,7 +1693,7 @@
END_FUNCTION art_quick_imt_conflict_trampoline
DEFINE_FUNCTION art_quick_resolution_trampoline
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME ebx, ebx
+ SETUP_SAVE_REFS_AND_ARGS_FRAME ebx, ebx
movl %esp, %edi
PUSH EDI // pass SP. do not just PUSH ESP; that messes up unwinding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
@@ -1706,14 +1706,14 @@
CFI_ADJUST_CFA_OFFSET(-16)
test %eax, %eax // if code pointer is null goto deliver pending exception
jz 1f
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME_AND_JUMP
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME_AND_JUMP
1:
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_resolution_trampoline
DEFINE_FUNCTION art_quick_generic_jni_trampoline
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_EAX
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_EAX
movl %esp, %ebp // save SP at callee-save frame
CFI_DEF_CFA_REGISTER(ebp)
subl LITERAL(5120), %esp
@@ -1792,7 +1792,7 @@
END_FUNCTION art_quick_generic_jni_trampoline
DEFINE_FUNCTION art_quick_to_interpreter_bridge
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME ebx, ebx // save frame
+ SETUP_SAVE_REFS_AND_ARGS_FRAME ebx, ebx // save frame
mov %esp, %edx // remember SP
PUSH eax // alignment padding
PUSH edx // pass SP
@@ -1822,11 +1822,11 @@
* Routine that intercepts method calls and returns.
*/
DEFINE_FUNCTION art_quick_instrumentation_entry
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME ebx, edx
+ SETUP_SAVE_REFS_AND_ARGS_FRAME ebx, edx
PUSH eax // Save eax which will be clobbered by the callee-save method.
subl LITERAL(12), %esp // Align stack.
CFI_ADJUST_CFA_OFFSET(12)
- pushl FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-4+16(%esp) // Pass LR.
+ pushl FRAME_SIZE_SAVE_REFS_AND_ARGS-4+16(%esp) // Pass LR.
CFI_ADJUST_CFA_OFFSET(4)
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
CFI_ADJUST_CFA_OFFSET(4)
@@ -1861,7 +1861,7 @@
DEFINE_FUNCTION art_quick_instrumentation_exit
pushl LITERAL(0) // Push a fake return PC as there will be none on the stack.
CFI_ADJUST_CFA_OFFSET(4)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx
mov %esp, %ecx // Remember SP
subl LITERAL(8), %esp // Save float return value.
CFI_ADJUST_CFA_OFFSET(8)
@@ -1887,7 +1887,7 @@
movq (%esp), %xmm0 // Restore fpr return value.
addl LITERAL(8), %esp
CFI_ADJUST_CFA_OFFSET(-8)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
addl LITERAL(4), %esp // Remove fake return pc.
CFI_ADJUST_CFA_OFFSET(-4)
jmp *%ecx // Return.
@@ -1899,7 +1899,7 @@
*/
DEFINE_FUNCTION art_quick_deoptimize
PUSH ebx // Entry point for a jump. Fake that we were called.
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx
subl LITERAL(12), %esp // Align stack.
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
@@ -1913,7 +1913,7 @@
* will long jump to the interpreter bridge.
*/
DEFINE_FUNCTION art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx
subl LITERAL(12), %esp // Align stack.
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
diff --git a/runtime/arch/x86/quick_method_frame_info_x86.h b/runtime/arch/x86/quick_method_frame_info_x86.h
index a1612c3..9fcde35 100644
--- a/runtime/arch/x86/quick_method_frame_info_x86.h
+++ b/runtime/arch/x86/quick_method_frame_info_x86.h
@@ -56,12 +56,12 @@
constexpr uint32_t X86CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
return kX86CalleeSaveAlwaysSpills | kX86CalleeSaveRefSpills |
- (type == Runtime::kRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
+ (type == Runtime::kSaveRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
(type == Runtime::kSaveEverything ? kX86CalleeSaveEverythingSpills : 0);
}
constexpr uint32_t X86CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
- return (type == Runtime::kRefsAndArgs ? kX86CalleeSaveFpArgSpills : 0) |
+ return (type == Runtime::kSaveRefsAndArgs ? kX86CalleeSaveFpArgSpills : 0) |
(type == Runtime::kSaveEverything ? kX86CalleeSaveFpEverythingSpills : 0);
}
diff --git a/runtime/arch/x86_64/asm_support_x86_64.h b/runtime/arch/x86_64/asm_support_x86_64.h
index 58dc2fe..a4446d3 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.h
+++ b/runtime/arch/x86_64/asm_support_x86_64.h
@@ -19,9 +19,9 @@
#include "asm_support.h"
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE (64 + 4*8)
-#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE (64 + 4*8)
-#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE (112 + 12*8)
-#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE (144 + 16*8)
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES (64 + 4*8)
+#define FRAME_SIZE_SAVE_REFS_ONLY (64 + 4*8)
+#define FRAME_SIZE_SAVE_REFS_AND_ARGS (112 + 12*8)
+#define FRAME_SIZE_SAVE_EVERYTHING (144 + 16*8)
#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 4741ac0..ac8f523 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -42,9 +42,9 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveAll)
+ * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
*/
-MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME)
+MACRO0(SETUP_SAVE_ALL_CALLEE_SAVES_FRAME)
#if defined(__APPLE__)
int3
int3
@@ -68,7 +68,7 @@
movq %xmm14, 24(%rsp)
movq %xmm15, 32(%rsp)
// R10 := ArtMethod* for save all callee save frame method.
- movq RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
+ movq RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET(%r10), %r10
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
// Store rsp as the top quick frame.
@@ -76,17 +76,17 @@
// Ugly compile-time check, but we only have the preprocessor.
// Last +8: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 6 * 8 + 4 * 8 + 8 + 8)
-#error "SAVE_ALL_CALLEE_SAVE_FRAME(X86_64) size not as expected."
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 6 * 8 + 4 * 8 + 8 + 8)
+#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(X86_64) size not as expected."
#endif
#endif // __APPLE__
END_MACRO
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsOnly)
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly)
*/
-MACRO0(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME)
+MACRO0(SETUP_SAVE_REFS_ONLY_FRAME)
#if defined(__APPLE__)
int3
int3
@@ -110,7 +110,7 @@
movq %xmm14, 24(%rsp)
movq %xmm15, 32(%rsp)
// R10 := ArtMethod* for refs only callee save frame method.
- movq RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
+ movq RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET(%r10), %r10
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
// Store rsp as the stop quick frame.
@@ -118,13 +118,13 @@
// Ugly compile-time check, but we only have the preprocessor.
// Last +8: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 6 * 8 + 4 * 8 + 8 + 8)
-#error "REFS_ONLY_CALLEE_SAVE_FRAME(X86_64) size not as expected."
+#if (FRAME_SIZE_SAVE_REFS_ONLY != 6 * 8 + 4 * 8 + 8 + 8)
+#error "FRAME_SIZE_SAVE_REFS_ONLY(X86_64) size not as expected."
#endif
#endif // __APPLE__
END_MACRO
-MACRO0(RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME)
+MACRO0(RESTORE_SAVE_REFS_ONLY_FRAME)
movq 8(%rsp), %xmm12
movq 16(%rsp), %xmm13
movq 24(%rsp), %xmm14
@@ -142,9 +142,9 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs)
*/
-MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME)
+MACRO0(SETUP_SAVE_REFS_AND_ARGS_FRAME)
#if defined(__APPLE__)
int3
int3
@@ -168,7 +168,7 @@
subq MACRO_LITERAL(16 + 12 * 8), %rsp
CFI_ADJUST_CFA_OFFSET(16 + 12 * 8)
// R10 := ArtMethod* for ref and args callee save frame method.
- movq RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
+ movq RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET(%r10), %r10
// Save FPRs.
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
@@ -189,13 +189,13 @@
// Ugly compile-time check, but we only have the preprocessor.
// Last +8: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 11 * 8 + 12 * 8 + 16 + 8)
-#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(X86_64) size not as expected."
+#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 11 * 8 + 12 * 8 + 16 + 8)
+#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(X86_64) size not as expected."
#endif
#endif // __APPLE__
END_MACRO
-MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_RDI)
+MACRO0(SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_RDI)
// Save callee and GPR args, mixed together to agree with core spills bitmap.
PUSH r15 // Callee save.
PUSH r14 // Callee save.
@@ -230,7 +230,7 @@
movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
END_MACRO
-MACRO0(RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME)
+MACRO0(RESTORE_SAVE_REFS_AND_ARGS_FRAME)
// Restore FPRs.
movq 16(%rsp), %xmm0
movq 24(%rsp), %xmm1
@@ -264,7 +264,7 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveEverything)
*/
-MACRO0(SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME)
+MACRO0(SETUP_SAVE_EVERYTHING_FRAME)
#if defined(__APPLE__)
int3
int3
@@ -309,20 +309,20 @@
movq %xmm14, 120(%rsp)
movq %xmm15, 128(%rsp)
// Push ArtMethod* for save everything frame method.
- pushq RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET(%r10)
+ pushq RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET(%r10)
CFI_ADJUST_CFA_OFFSET(8)
// Store rsp as the top quick frame.
movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +8: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 15 * 8 + 16 * 8 + 16 + 8)
-#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(X86_64) size not as expected."
+#if (FRAME_SIZE_SAVE_EVERYTHING != 15 * 8 + 16 * 8 + 16 + 8)
+#error "FRAME_SIZE_SAVE_EVERYTHING(X86_64) size not as expected."
#endif
#endif // __APPLE__
END_MACRO
-MACRO0(RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME)
+MACRO0(RESTORE_SAVE_EVERYTHING_FRAME)
// Restore FPRs. Method and padding is still on the stack.
movq 16(%rsp), %xmm0
movq 24(%rsp), %xmm1
@@ -368,7 +368,7 @@
* exception is Thread::Current()->exception_.
*/
MACRO0(DELIVER_PENDING_EXCEPTION)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save callee saves for throw
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save callee saves for throw
// (Thread*) setup
movq %gs:THREAD_SELF_OFFSET, %rdi
call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*)
@@ -377,7 +377,7 @@
MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(Thread*)
@@ -387,7 +387,7 @@
MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
@@ -397,7 +397,7 @@
MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(Thread*)
@@ -466,7 +466,7 @@
* Adapted from x86 code.
*/
MACRO1(INVOKE_TRAMPOLINE_BODY, cxx_name)
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME // save callee saves in case allocation triggers GC
+ SETUP_SAVE_REFS_AND_ARGS_FRAME // save callee saves in case allocation triggers GC
// Helper signature is always
// (method_idx, *this_object, *caller_method, *self, sp)
@@ -477,7 +477,7 @@
// save the code pointer
movq %rax, %rdi
movq %rdx, %rax
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
testq %rdi, %rdi
jz 1f
@@ -806,44 +806,44 @@
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg0, arg1, arg2, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(FOUR_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, arg4, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
@@ -851,11 +851,11 @@
MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
movq 8(%rsp), %rsi // pass referrer
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_ONLY_FRAME
// arg0 is in rdi
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg0, referrer, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro)
END_FUNCTION VAR(c_name)
END_MACRO
@@ -863,11 +863,11 @@
MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
movq 8(%rsp), %rdx // pass referrer
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_ONLY_FRAME
// arg0 and arg1 are in rdi/rsi
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
call CALLVAR(cxx_name) // (arg0, arg1, referrer, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro)
END_FUNCTION VAR(c_name)
END_MACRO
@@ -875,11 +875,11 @@
MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
movq 8(%rsp), %rcx // pass referrer
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_ONLY_FRAME
// arg0, arg1, and arg2 are in rdi/rsi/rdx
movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg0, arg1, arg2, referrer, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
@@ -917,83 +917,83 @@
// Fast path rosalloc allocation.
// RDI: type_idx, RSI: ArtMethod*, RAX: return value
// RDX, RCX, R8, R9: free.
- movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array
- // Load the class (edx)
+ movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array
+ // Load the class (edx)
movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx
- testl %edx, %edx // Check null class
+ testl %edx, %edx // Check null class
jz .Lart_quick_alloc_object_rosalloc_slow_path
- // Check class status.
+ // Check class status.
cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%rdx)
jne .Lart_quick_alloc_object_rosalloc_slow_path
- // We don't need a fence (between the
- // the status and the access flag
- // loads) here because every load is
- // a load acquire on x86.
- // Check access flags has
- // kAccClassIsFinalizable
+ // We don't need a fence (between the
+ // the status and the access flag
+ // loads) here because every load is
+ // a load acquire on x86.
+ // Check access flags has
+ // kAccClassIsFinalizable
testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%rdx)
jnz .Lart_quick_alloc_object_rosalloc_slow_path
- // Check if the thread local
- // allocation stack has room.
- movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
- movq THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%r8), %rcx // rcx = alloc stack top.
+ // Check if the thread local
+ // allocation stack has room.
+ movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
+ movq THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%r8), %rcx // rcx = alloc stack top.
cmpq THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%r8), %rcx
jae .Lart_quick_alloc_object_rosalloc_slow_path
- // Load the object size
+ // Load the object size
movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%rdx), %eax
- // Check if the size is for a thread
- // local allocation
+ // Check if the size is for a thread
+ // local allocation
cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %eax
ja .Lart_quick_alloc_object_rosalloc_slow_path
- // Compute the rosalloc bracket index
- // from the size.
- // Align up the size by the rosalloc
- // bracket quantum size and divide
- // by the quantum size and subtract
- // by 1. This code is a shorter but
- // equivalent version.
+ // Compute the rosalloc bracket index
+ // from the size.
+ // Align up the size by the rosalloc
+ // bracket quantum size and divide
+ // by the quantum size and subtract
+ // by 1. This code is a shorter but
+ // equivalent version.
subq LITERAL(1), %rax
shrq LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %rax
- // Load the rosalloc run (r9)
+ // Load the rosalloc run (r9)
movq THREAD_ROSALLOC_RUNS_OFFSET(%r8, %rax, __SIZEOF_POINTER__), %r9
- // Load the free list head (rax). This
- // will be the return val.
+ // Load the free list head (rax). This
+ // will be the return val.
movq (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%r9), %rax
testq %rax, %rax
jz .Lart_quick_alloc_object_rosalloc_slow_path
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber rdi and rsi.
- // Push the new object onto the thread
- // local allocation stack and
- // increment the thread local
- // allocation stack top.
+ // Push the new object onto the thread
+ // local allocation stack and
+ // increment the thread local
+ // allocation stack top.
movl %eax, (%rcx)
addq LITERAL(COMPRESSED_REFERENCE_SIZE), %rcx
movq %rcx, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%r8)
- // Load the next pointer of the head
- // and update the list head with the
- // next pointer.
+ // Load the next pointer of the head
+ // and update the list head with the
+ // next pointer.
movq ROSALLOC_SLOT_NEXT_OFFSET(%rax), %rcx
movq %rcx, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%r9)
- // Store the class pointer in the
- // header. This also overwrites the
- // next pointer. The offsets are
- // asserted to match.
+ // Store the class pointer in the
+ // header. This also overwrites the
+ // next pointer. The offsets are
+ // asserted to match.
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
POISON_HEAP_REF edx
movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax)
- // Decrement the size of the free list
+ // Decrement the size of the free list
decl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)(%r9)
- // No fence necessary for x86.
+ // No fence necessary for x86.
ret
.Lart_quick_alloc_object_rosalloc_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
// Outgoing argument set up
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call SYMBOL(artAllocObjectFromCodeRosAlloc) // cxx_name(arg0, arg1, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
+ call SYMBOL(artAllocObjectFromCodeRosAlloc) // cxx_name(arg0, arg1, Thread*)
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_FUNCTION art_quick_alloc_object_rosalloc
// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
@@ -1001,49 +1001,49 @@
// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value.
// RCX: scratch, r8: Thread::Current().
MACRO1(ALLOC_OBJECT_TLAB_FAST_PATH, slowPathLabel)
- testl %edx, %edx // Check null class
+ testl %edx, %edx // Check null class
jz RAW_VAR(slowPathLabel)
- // Check class status.
+ // Check class status.
cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%rdx)
jne RAW_VAR(slowPathLabel)
- // No fake dependence needed on x86
- // between status and flags load,
- // since each load is a load-acquire,
- // no loads reordering.
- // Check access flags has
- // kAccClassIsFinalizable
+ // No fake dependence needed on x86
+ // between status and flags load,
+ // since each load is a load-acquire,
+ // no loads reordering.
+ // Check access flags has
+ // kAccClassIsFinalizable
testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%rdx)
jnz RAW_VAR(slowPathLabel)
- movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
- movq THREAD_LOCAL_END_OFFSET(%r8), %rax // Load thread_local_end.
- subq THREAD_LOCAL_POS_OFFSET(%r8), %rax // Compute the remaining buffer size.
- movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%rdx), %ecx // Load the object size.
- cmpq %rax, %rcx // Check if it fits. OK to do this
- // before rounding up the object size
- // assuming the buf size alignment.
+ movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
+ movq THREAD_LOCAL_END_OFFSET(%r8), %rax // Load thread_local_end.
+ subq THREAD_LOCAL_POS_OFFSET(%r8), %rax // Compute the remaining buffer size.
+ movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%rdx), %ecx // Load the object size.
+ cmpq %rax, %rcx // Check if it fits. OK to do this
+ // before rounding up the object size
+ // assuming the buf size alignment.
ja RAW_VAR(slowPathLabel)
- addl LITERAL(OBJECT_ALIGNMENT_MASK), %ecx // Align the size by 8. (addr + 7) & ~7.
+ addl LITERAL(OBJECT_ALIGNMENT_MASK), %ecx // Align the size by 8. (addr + 7) & ~7.
andl LITERAL(OBJECT_ALIGNMENT_MASK_TOGGLED), %ecx
- movq THREAD_LOCAL_POS_OFFSET(%r8), %rax // Load thread_local_pos
- // as allocated object.
- addq %rax, %rcx // Add the object size.
- movq %rcx, THREAD_LOCAL_POS_OFFSET(%r8) // Update thread_local_pos.
- addq LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%r8) // Increase thread_local_objects.
- // Store the class pointer in the header.
- // No fence needed for x86.
+ movq THREAD_LOCAL_POS_OFFSET(%r8), %rax // Load thread_local_pos
+ // as allocated object.
+ addq %rax, %rcx // Add the object size.
+ movq %rcx, THREAD_LOCAL_POS_OFFSET(%r8) // Update thread_local_pos.
+ addq LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%r8) // Increase thread_local_objects.
+ // Store the class pointer in the header.
+ // No fence needed for x86.
POISON_HEAP_REF edx
movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax)
- ret // Fast path succeeded.
+ ret // Fast path succeeded.
END_MACRO
// The common slow path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
MACRO1(ALLOC_OBJECT_TLAB_SLOW_PATH, cxx_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
// Outgoing argument set up
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
+ call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
@@ -1145,18 +1145,18 @@
jnz .Lretry_lock // cmpxchg failed retry
ret
.Lslow_lock:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_ONLY_FRAME
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object
DEFINE_FUNCTION art_quick_lock_object_no_inline
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_ONLY_FRAME
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object_no_inline
@@ -1196,18 +1196,18 @@
#endif
ret
.Lslow_unlock:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_ONLY_FRAME
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object
DEFINE_FUNCTION art_quick_unlock_object_no_inline
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_ONLY_FRAME
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object_no_inline
@@ -1233,7 +1233,7 @@
CFI_ADJUST_CFA_OFFSET(-8)
POP rsi // Pop arguments
POP rdi
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
mov %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*)
UNREACHABLE
@@ -1410,7 +1410,7 @@
POP rsi
POP rdi
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // Save all registers as basis for long jump context.
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // Save all registers as basis for long jump context.
// Outgoing argument set up.
movq %rdx, %rsi // Pass arg 2 = value.
@@ -1427,11 +1427,11 @@
END_FUNCTION art_quick_memcpy
DEFINE_FUNCTION art_quick_test_suspend
- SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME // save everything for GC
+ SETUP_SAVE_EVERYTHING_FRAME // save everything for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
call SYMBOL(artTestSuspendFromCode) // (Thread*)
- RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_EVERYTHING_FRAME // restore frame up to return address
ret
END_FUNCTION art_quick_test_suspend
@@ -1473,22 +1473,22 @@
DEFINE_FUNCTION art_quick_set64_static
// new_val is already in %rdx
movq 8(%rsp), %rsi // pass referrer
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_ONLY_FRAME
// field_idx is in rdi
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set64_static
DEFINE_FUNCTION art_quick_proxy_invoke_handler
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_RDI
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_RDI
movq %gs:THREAD_SELF_OFFSET, %rdx // Pass Thread::Current().
movq %rsp, %rcx // Pass SP.
call SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
movq %rax, %xmm0 // Copy return value in case of float returns.
RETURN_OR_DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_proxy_invoke_handler
@@ -1531,13 +1531,13 @@
END_FUNCTION art_quick_imt_conflict_trampoline
DEFINE_FUNCTION art_quick_resolution_trampoline
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
movq %gs:THREAD_SELF_OFFSET, %rdx
movq %rsp, %rcx
call SYMBOL(artQuickResolutionTrampoline) // (called, receiver, Thread*, SP)
movq %rax, %r10 // Remember returned code pointer in R10.
movq (%rsp), %rdi // Load called method into RDI.
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
testq %r10, %r10 // If code pointer is null goto deliver pending exception.
jz 1f
jmp *%r10 // Tail call into method.
@@ -1622,7 +1622,7 @@
* Called to do a generic JNI down-call
*/
DEFINE_FUNCTION art_quick_generic_jni_trampoline
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_RDI
+ SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_RDI
movq %rsp, %rbp // save SP at (old) callee-save frame
CFI_DEF_CFA_REGISTER(rbp)
@@ -1755,11 +1755,11 @@
* RSI, RDX, RCX, R8, R9 are arguments to that method.
*/
DEFINE_FUNCTION art_quick_to_interpreter_bridge
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Set up frame and save arguments.
- movq %gs:THREAD_SELF_OFFSET, %rsi // RSI := Thread::Current()
- movq %rsp, %rdx // RDX := sp
+ SETUP_SAVE_REFS_AND_ARGS_FRAME // Set up frame and save arguments.
+ movq %gs:THREAD_SELF_OFFSET, %rsi // RSI := Thread::Current()
+ movq %rsp, %rdx // RDX := sp
call SYMBOL(artQuickToInterpreterBridge) // (method, Thread*, SP)
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // TODO: no need to restore arguments in this case.
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME // TODO: no need to restore arguments in this case.
movq %rax, %xmm0 // Place return value also into floating point return value.
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_to_interpreter_bridge
@@ -1772,12 +1772,12 @@
int3
int3
#else
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
movq %rdi, %r12 // Preserve method pointer in a callee-save.
movq %gs:THREAD_SELF_OFFSET, %rdx // Pass thread.
- movq FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-8(%rsp), %rcx // Pass return PC.
+ movq FRAME_SIZE_SAVE_REFS_AND_ARGS-8(%rsp), %rcx // Pass return PC.
call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, LR)
@@ -1785,9 +1785,9 @@
movq %r12, %rdi // Reload method pointer.
leaq art_quick_instrumentation_exit(%rip), %r12 // Set up return through instrumentation
- movq %r12, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-8(%rsp) // exit.
+ movq %r12, FRAME_SIZE_SAVE_REFS_AND_ARGS-8(%rsp) // exit.
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
jmp *%rax // Tail call to intended method.
#endif // __APPLE__
@@ -1796,7 +1796,7 @@
DEFINE_FUNCTION art_quick_instrumentation_exit
pushq LITERAL(0) // Push a fake return PC as there will be none on the stack.
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ SETUP_SAVE_REFS_ONLY_FRAME
// We need to save rax and xmm0. We could use a callee-save from SETUP_REF_ONLY, but then
// we would need to fully restore it. As there are a good number of callee-save registers, it
@@ -1823,7 +1823,7 @@
CFI_ADJUST_CFA_OFFSET(-8)
POP rax // Restore integer result.
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_SAVE_REFS_ONLY_FRAME
addq LITERAL(8), %rsp // Drop fake return pc.
@@ -1837,7 +1837,7 @@
DEFINE_FUNCTION art_quick_deoptimize
pushq %rsi // Entry point for a jump. Fake that we were called.
// Use hidden arg.
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
// Stack should be aligned now.
movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread.
call SYMBOL(artDeoptimize) // artDeoptimize(Thread*)
@@ -1849,7 +1849,7 @@
* will long jump to the interpreter bridge.
*/
DEFINE_FUNCTION art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
// Stack should be aligned now.
movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread.
call SYMBOL(artDeoptimizeFromCompiledCode) // artDeoptimizeFromCompiledCode(Thread*)
diff --git a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
index aa75b56..867522f 100644
--- a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
+++ b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
@@ -55,13 +55,13 @@
constexpr uint32_t X86_64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
return kX86_64CalleeSaveAlwaysSpills | kX86_64CalleeSaveRefSpills |
- (type == Runtime::kRefsAndArgs ? kX86_64CalleeSaveArgSpills : 0) |
+ (type == Runtime::kSaveRefsAndArgs ? kX86_64CalleeSaveArgSpills : 0) |
(type == Runtime::kSaveEverything ? kX86_64CalleeSaveEverythingSpills : 0);
}
constexpr uint32_t X86_64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kX86_64CalleeSaveFpSpills |
- (type == Runtime::kRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0) |
+ (type == Runtime::kSaveRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0) |
(type == Runtime::kSaveEverything ? kX86_64CalleeSaveFpEverythingSpills : 0);
}
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 2421246..1659f33 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -322,11 +322,11 @@
return "<runtime internal resolution method>";
} else if (this == runtime->GetImtConflictMethod()) {
return "<runtime internal imt conflict method>";
- } else if (this == runtime->GetCalleeSaveMethod(Runtime::kSaveAll)) {
+ } else if (this == runtime->GetCalleeSaveMethod(Runtime::kSaveAllCalleeSaves)) {
return "<runtime internal callee-save all registers method>";
- } else if (this == runtime->GetCalleeSaveMethod(Runtime::kRefsOnly)) {
+ } else if (this == runtime->GetCalleeSaveMethod(Runtime::kSaveRefsOnly)) {
return "<runtime internal callee-save reference registers method>";
- } else if (this == runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs)) {
+ } else if (this == runtime->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs)) {
return "<runtime internal callee-save reference and argument registers method>";
} else {
return "<unknown runtime internal method>";
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 0619af8..d4cee44 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -20,6 +20,7 @@
#if defined(__cplusplus)
#include "art_method.h"
#include "gc/allocator/rosalloc.h"
+#include "gc/heap.h"
#include "jit/jit.h"
#include "lock_word.h"
#include "mirror/class.h"
@@ -174,10 +175,17 @@
#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (100 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_OFFSET,
art::mirror::Class::ObjectSizeOffset().Int32Value())
+#define MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET (104 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET,
+ art::mirror::Class::PrimitiveTypeOffset().Int32Value())
#define MIRROR_CLASS_STATUS_OFFSET (112 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET,
art::mirror::Class::StatusOffset().Int32Value())
+#define PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT 16
+ADD_TEST_EQ(PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT,
+ static_cast<int>(art::mirror::Class::kPrimitiveTypeSizeShiftShift))
+
// Array offsets.
#define MIRROR_ARRAY_LENGTH_OFFSET MIRROR_OBJECT_HEADER_SIZE
ADD_TEST_EQ(MIRROR_ARRAY_LENGTH_OFFSET, art::mirror::Array::LengthOffset().Int32Value())
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index cea7046..e48eca9 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -52,7 +52,6 @@
kAllocatorTagMonitorList,
kAllocatorTagClassTable,
kAllocatorTagInternTable,
- kAllocatorTagLambdaBoxTable,
kAllocatorTagMaps,
kAllocatorTagLOS,
kAllocatorTagSafeMap,
diff --git a/runtime/base/array_slice.h b/runtime/base/array_slice.h
index 19ad302..32283d0 100644
--- a/runtime/base/array_slice.h
+++ b/runtime/base/array_slice.h
@@ -129,6 +129,10 @@
return element_size_;
}
+ bool Contains(const T* element) const {
+ return &AtUnchecked(0) <= element && element < &AtUnchecked(size_);
+ }
+
private:
T& AtUnchecked(size_t index) {
return *reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(array_) + index * element_size_);
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 6f689d7..264a530 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -62,7 +62,6 @@
Mutex* Locks::thread_suspend_count_lock_ = nullptr;
Mutex* Locks::trace_lock_ = nullptr;
Mutex* Locks::unexpected_signal_lock_ = nullptr;
-Mutex* Locks::lambda_table_lock_ = nullptr;
Uninterruptible Roles::uninterruptible_;
struct AllMutexData {
@@ -963,7 +962,6 @@
DCHECK(thread_suspend_count_lock_ != nullptr);
DCHECK(trace_lock_ != nullptr);
DCHECK(unexpected_signal_lock_ != nullptr);
- DCHECK(lambda_table_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
LockLevel current_lock_level = kInstrumentEntrypointsLock;
@@ -1074,10 +1072,6 @@
DCHECK(reference_queue_soft_references_lock_ == nullptr);
reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
- UPDATE_CURRENT_LOCK_LEVEL(kLambdaTableLock);
- DCHECK(lambda_table_lock_ == nullptr);
- lambda_table_lock_ = new Mutex("lambda table lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
DCHECK(abort_lock_ == nullptr);
abort_lock_ = new Mutex("abort lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 3d7624d..d0dc886 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -60,7 +60,6 @@
kUnexpectedSignalLock,
kThreadSuspendCountLock,
kAbortLock,
- kLambdaTableLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
kRosAllocGlobalLock,
@@ -88,7 +87,6 @@
kTracingUniqueMethodsLock,
kTracingStreamingLock,
kDeoptimizedMethodsLock,
- kJitCodeCacheLock,
kClassLoaderClassesLock,
kDefaultMutexLevel,
kMarkSweepLargeObjectLock,
@@ -99,6 +97,7 @@
kMonitorPoolLock,
kMethodVerifiersLock,
kClassLinkerClassesLock, // TODO rename.
+ kJitCodeCacheLock,
kBreakpointLock,
kMonitorLock,
kMonitorListLock,
@@ -690,10 +689,6 @@
// Have an exclusive logging thread.
static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
-
- // Allow reader-writer mutual exclusion on the boxed table of lambda objects.
- // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it.
- static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_);
};
class Roles {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 534f53d..4d48da6 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3560,32 +3560,40 @@
}
LOG(INFO) << "Loaded class " << descriptor << source;
}
- WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- mirror::ClassLoader* const class_loader = klass->GetClassLoader();
- ClassTable* const class_table = InsertClassTableForClassLoader(class_loader);
- mirror::Class* existing = class_table->Lookup(descriptor, hash);
- if (existing != nullptr) {
- return existing;
- }
- if (kIsDebugBuild &&
- !klass->IsTemp() &&
- class_loader == nullptr &&
- dex_cache_boot_image_class_lookup_required_) {
- // Check a class loaded with the system class loader matches one in the image if the class
- // is in the image.
- existing = LookupClassFromBootImage(descriptor);
+ {
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ mirror::ClassLoader* const class_loader = klass->GetClassLoader();
+ ClassTable* const class_table = InsertClassTableForClassLoader(class_loader);
+ mirror::Class* existing = class_table->Lookup(descriptor, hash);
if (existing != nullptr) {
- CHECK_EQ(klass, existing);
+ return existing;
+ }
+ if (kIsDebugBuild &&
+ !klass->IsTemp() &&
+ class_loader == nullptr &&
+ dex_cache_boot_image_class_lookup_required_) {
+ // Check a class loaded with the system class loader matches one in the image if the class
+ // is in the image.
+ existing = LookupClassFromBootImage(descriptor);
+ if (existing != nullptr) {
+ CHECK_EQ(klass, existing);
+ }
+ }
+ VerifyObject(klass);
+ class_table->InsertWithHash(klass, hash);
+ if (class_loader != nullptr) {
+ // This is necessary because we need to have the card dirtied for remembered sets.
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+ }
+ if (log_new_class_table_roots_) {
+ new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
}
}
- VerifyObject(klass);
- class_table->InsertWithHash(klass, hash);
- if (class_loader != nullptr) {
- // This is necessary because we need to have the card dirtied for remembered sets.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
- }
- if (log_new_class_table_roots_) {
- new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
+ if (kIsDebugBuild) {
+ // Test that copied methods correctly can find their holder.
+ for (ArtMethod& method : klass->GetCopiedMethods(image_pointer_size_)) {
+ CHECK_EQ(GetHoldingClassOfCopiedMethod(&method), klass);
+ }
}
return nullptr;
}
@@ -4622,18 +4630,23 @@
} else {
value_it.ReadValueToField<false>(field);
}
+ if (self->IsExceptionPending()) {
+ break;
+ }
DCHECK(!value_it.HasNext() || field_it.HasNextStaticField());
}
}
}
- ArtMethod* clinit = klass->FindClassInitializer(image_pointer_size_);
- if (clinit != nullptr) {
- CHECK(can_init_statics);
- JValue result;
- clinit->Invoke(self, nullptr, 0, &result, "V");
- }
+ if (!self->IsExceptionPending()) {
+ ArtMethod* clinit = klass->FindClassInitializer(image_pointer_size_);
+ if (clinit != nullptr) {
+ CHECK(can_init_statics);
+ JValue result;
+ clinit->Invoke(self, nullptr, 0, &result, "V");
+ }
+ }
self->AllowThreadSuspension();
uint64_t t1 = NanoTime();
@@ -8105,19 +8118,27 @@
void ClassLinker::CleanupClassLoaders() {
Thread* const self = Thread::Current();
- WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- for (auto it = class_loaders_.begin(); it != class_loaders_.end(); ) {
- const ClassLoaderData& data = *it;
- // Need to use DecodeJObject so that we get null for cleared JNI weak globals.
- auto* const class_loader = down_cast<mirror::ClassLoader*>(self->DecodeJObject(data.weak_root));
- if (class_loader != nullptr) {
- ++it;
- } else {
- VLOG(class_linker) << "Freeing class loader";
- DeleteClassLoader(self, data);
- it = class_loaders_.erase(it);
+ std::vector<ClassLoaderData> to_delete;
+ // Do the delete outside the lock to avoid lock violation in jit code cache.
+ {
+ WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ for (auto it = class_loaders_.begin(); it != class_loaders_.end(); ) {
+ const ClassLoaderData& data = *it;
+ // Need to use DecodeJObject so that we get null for cleared JNI weak globals.
+ auto* const class_loader =
+ down_cast<mirror::ClassLoader*>(self->DecodeJObject(data.weak_root));
+ if (class_loader != nullptr) {
+ ++it;
+ } else {
+ VLOG(class_linker) << "Freeing class loader";
+ to_delete.push_back(data);
+ it = class_loaders_.erase(it);
+ }
}
}
+ for (ClassLoaderData& data : to_delete) {
+ DeleteClassLoader(self, data);
+ }
}
std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_boot_classes) {
@@ -8236,6 +8257,33 @@
return ret;
}
+class ClassLinker::FindVirtualMethodHolderVisitor : public ClassVisitor {
+ public:
+ FindVirtualMethodHolderVisitor(const ArtMethod* method, PointerSize pointer_size)
+ : method_(method),
+ pointer_size_(pointer_size) {}
+
+ bool operator()(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE {
+ if (klass->GetVirtualMethodsSliceUnchecked(pointer_size_).Contains(method_)) {
+ holder_ = klass;
+ }
+ // Return false to stop searching if holder_ is not null.
+ return holder_ == nullptr;
+ }
+
+ mirror::Class* holder_ = nullptr;
+ const ArtMethod* const method_;
+ const PointerSize pointer_size_;
+};
+
+mirror::Class* ClassLinker::GetHoldingClassOfCopiedMethod(ArtMethod* method) {
+ ScopedTrace trace(__FUNCTION__); // Since this function is slow, have a trace to notify people.
+ CHECK(method->IsCopied());
+ FindVirtualMethodHolderVisitor visitor(method, image_pointer_size_);
+ VisitClasses(&visitor);
+ return visitor.holder_;
+}
+
// Instantiate ResolveMethod.
template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::kForceICCECheck>(
const DexFile& dex_file,
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index fcc6b23..c3ab8c5 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -648,6 +648,10 @@
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
+ // Get the actual holding class for a copied method. Pretty slow, don't call often.
+ mirror::Class* GetHoldingClassOfCopiedMethod(ArtMethod* method)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
struct DexCacheData {
// Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may
// not work properly.
@@ -676,7 +680,6 @@
SHARED_REQUIRES(Locks::mutator_lock_);
static void DeleteClassLoader(Thread* self, const ClassLoaderData& data)
- REQUIRES(Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
void VisitClassLoaders(ClassLoaderVisitor* visitor) const
@@ -1168,6 +1171,7 @@
// Image pointer size.
PointerSize image_pointer_size_;
+ class FindVirtualMethodHolderVisitor;
friend class ImageDumper; // for DexLock
friend class ImageWriter; // for GetClassRoots
friend class JniCompilerTest; // for GetRuntimeQuickGenericJniStub
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 912a74a..99732c6 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -640,14 +640,6 @@
"Attempt to write to null array");
break;
}
- case Instruction::INVOKE_LAMBDA:
- case Instruction::BOX_LAMBDA:
- case Instruction::UNBOX_LAMBDA:
- case Instruction::LIBERATE_VARIABLE: {
- ThrowException("Ljava/lang/NullPointerException;", nullptr,
- "Using a null lambda");
- break;
- }
case Instruction::MONITOR_ENTER:
case Instruction::MONITOR_EXIT: {
ThrowException("Ljava/lang/NullPointerException;", nullptr,
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 9f3ff3f..2a5198b 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1454,6 +1454,15 @@
}
}
+static size_t GetMethodNumArgRegistersIncludingThis(ArtMethod* method)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t num_registers = ArtMethod::NumArgRegisters(method->GetShorty());
+ if (!method->IsStatic()) {
+ ++num_registers;
+ }
+ return num_registers;
+}
+
/*
* Circularly shifts registers so that arguments come last. Reverts
* slots to dex style argument placement.
@@ -1465,7 +1474,7 @@
// We should not get here for a method without code (native, proxy or abstract). Log it and
// return the slot as is since all registers are arguments.
LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
- uint16_t vreg_count = ArtMethod::NumArgRegisters(m->GetShorty());
+ uint16_t vreg_count = GetMethodNumArgRegistersIncludingThis(m);
if (slot < vreg_count) {
*error = JDWP::ERR_NONE;
return slot;
@@ -1637,8 +1646,7 @@
// arg_count considers doubles and longs to take 2 units.
// variable_count considers everything to take 1 unit.
- std::string shorty(m->GetShorty());
- expandBufAdd4BE(pReply, ArtMethod::NumArgRegisters(shorty));
+ expandBufAdd4BE(pReply, GetMethodNumArgRegistersIncludingThis(m));
// We don't know the total number of variables yet, so leave a blank and update it later.
size_t variable_count_offset = expandBufGetLength(pReply);
diff --git a/runtime/dex_instruction-inl.h b/runtime/dex_instruction-inl.h
index e160a10..3d0fea0 100644
--- a/runtime/dex_instruction-inl.h
+++ b/runtime/dex_instruction-inl.h
@@ -49,6 +49,8 @@
case k32x: return true;
case k35c: return true;
case k3rc: return true;
+ case k45cc: return true;
+ case k4rcc: return true;
case k51l: return true;
default: return false;
}
@@ -79,6 +81,8 @@
case k32x: return VRegA_32x();
case k35c: return VRegA_35c();
case k3rc: return VRegA_3rc();
+ case k45cc: return VRegA_45cc();
+ case k4rcc: return VRegA_4rcc();
case k51l: return VRegA_51l();
default:
LOG(FATAL) << "Tried to access vA of instruction " << Name() << " which has no A operand.";
@@ -206,6 +210,16 @@
return InstAA(inst_data);
}
+inline uint4_t Instruction::VRegA_45cc(uint16_t inst_data) const {
+ DCHECK_EQ(FormatOf(Opcode()), k45cc);
+ return InstB(inst_data); // This is labeled A in the spec.
+}
+
+inline uint8_t Instruction::VRegA_4rcc(uint16_t inst_data) const {
+ DCHECK_EQ(FormatOf(Opcode()), k4rcc);
+ return InstAA(inst_data);
+}
+
//------------------------------------------------------------------------------
// VRegB
//------------------------------------------------------------------------------
@@ -223,13 +237,14 @@
case k22t: return true;
case k22x: return true;
case k23x: return true;
- case k25x: return true;
case k31c: return true;
case k31i: return true;
case k31t: return true;
case k32x: return true;
case k35c: return true;
case k3rc: return true;
+ case k45cc: return true;
+ case k4rcc: return true;
case k51l: return true;
default: return false;
}
@@ -253,13 +268,14 @@
case k22t: return VRegB_22t();
case k22x: return VRegB_22x();
case k23x: return VRegB_23x();
- case k25x: return VRegB_25x();
case k31c: return VRegB_31c();
case k31i: return VRegB_31i();
case k31t: return VRegB_31t();
case k32x: return VRegB_32x();
case k35c: return VRegB_35c();
case k3rc: return VRegB_3rc();
+ case k45cc: return VRegB_45cc();
+ case k4rcc: return VRegB_4rcc();
case k51l: return VRegB_51l();
default:
LOG(FATAL) << "Tried to access vB of instruction " << Name() << " which has no B operand.";
@@ -331,12 +347,6 @@
return static_cast<uint8_t>(Fetch16(1) & 0xff);
}
-// Number of additional registers in this instruction. # of var arg registers = this value + 1.
-inline uint4_t Instruction::VRegB_25x() const {
- DCHECK_EQ(FormatOf(Opcode()), k25x);
- return InstB(Fetch16(0));
-}
-
inline uint32_t Instruction::VRegB_31c() const {
DCHECK_EQ(FormatOf(Opcode()), k31c);
return Fetch32(1);
@@ -367,6 +377,16 @@
return Fetch16(1);
}
+inline uint16_t Instruction::VRegB_45cc() const {
+ DCHECK_EQ(FormatOf(Opcode()), k45cc);
+ return Fetch16(1);
+}
+
+inline uint16_t Instruction::VRegB_4rcc() const {
+ DCHECK_EQ(FormatOf(Opcode()), k4rcc);
+ return Fetch16(1);
+}
+
inline uint64_t Instruction::VRegB_51l() const {
DCHECK_EQ(FormatOf(Opcode()), k51l);
uint64_t vB_wide = Fetch32(1) | ((uint64_t) Fetch32(3) << 32);
@@ -383,9 +403,10 @@
case k22s: return true;
case k22t: return true;
case k23x: return true;
- case k25x: return true;
case k35c: return true;
case k3rc: return true;
+ case k45cc: return true;
+ case k4rcc: return true;
default: return false;
}
}
@@ -397,9 +418,10 @@
case k22s: return VRegC_22s();
case k22t: return VRegC_22t();
case k23x: return VRegC_23x();
- case k25x: return VRegC_25x();
case k35c: return VRegC_35c();
case k3rc: return VRegC_3rc();
+ case k45cc: return VRegC_45cc();
+ case k4rcc: return VRegC_4rcc();
default:
LOG(FATAL) << "Tried to access vC of instruction " << Name() << " which has no C operand.";
exit(EXIT_FAILURE);
@@ -431,11 +453,6 @@
return static_cast<uint8_t>(Fetch16(1) >> 8);
}
-inline uint4_t Instruction::VRegC_25x() const {
- DCHECK_EQ(FormatOf(Opcode()), k25x);
- return static_cast<uint4_t>(Fetch16(1) & 0xf);
-}
-
inline uint4_t Instruction::VRegC_35c() const {
DCHECK_EQ(FormatOf(Opcode()), k35c);
return static_cast<uint4_t>(Fetch16(2) & 0x0f);
@@ -446,79 +463,51 @@
return Fetch16(2);
}
-inline bool Instruction::HasVarArgs35c() const {
- return FormatOf(Opcode()) == k35c;
+inline uint4_t Instruction::VRegC_45cc() const {
+ DCHECK_EQ(FormatOf(Opcode()), k45cc);
+ return static_cast<uint4_t>(Fetch16(2) & 0x0f);
}
-inline bool Instruction::HasVarArgs25x() const {
- return FormatOf(Opcode()) == k25x;
+inline uint16_t Instruction::VRegC_4rcc() const {
+ DCHECK_EQ(FormatOf(Opcode()), k4rcc);
+ return Fetch16(2);
}
-// Copies all of the parameter registers into the arg array. Check the length with VRegB_25x()+2.
-inline void Instruction::GetAllArgs25x(uint32_t (&arg)[kMaxVarArgRegs25x]) const {
- DCHECK_EQ(FormatOf(Opcode()), k25x);
-
- /*
- * The opcode looks like this:
- * op vC, {vD, vE, vF, vG}
- *
- * and vB is the (implicit) register count (0-4) which denotes how far from vD to vG to read.
- *
- * vC is always present, so with "op vC, {}" the register count will be 0 even though vC
- * is valid.
- *
- * The exact semantic meanings of vC:vG is up to the instruction using the format.
- *
- * Encoding drawing as a bit stream:
- * (Note that each uint16 is little endian, and each register takes up 4 bits)
- *
- * uint16 ||| uint16
- * 7-0 15-8 7-0 15-8
- * |------|-----|||-----|-----|
- * |opcode|vB|vG|||vD|vC|vF|vE|
- * |------|-----|||-----|-----|
- */
- uint16_t reg_list = Fetch16(1);
- uint4_t count = VRegB_25x();
- DCHECK_LE(count, 4U) << "Invalid arg count in 25x (" << count << ")";
-
- /*
- * TODO(iam): Change instruction encoding to one of:
- *
- * - (X) vA = args count, vB = closure register, {vC..vG} = args (25x)
- * - (Y) vA = args count, vB = method index, {vC..vG} = args (35x)
- *
- * (do this in conjunction with adding verifier support for invoke-lambda)
- */
-
- /*
- * Copy the argument registers into the arg[] array, and
- * also copy the first argument into vC. (The
- * DecodedInstruction structure doesn't have separate
- * fields for {vD, vE, vF, vG}, so there's no need to make
- * copies of those.) Note that all cases fall-through.
- */
- switch (count) {
- case 4:
- arg[5] = (Fetch16(0) >> 8) & 0x0f; // vG
- FALLTHROUGH_INTENDED;
- case 3:
- arg[4] = (reg_list >> 12) & 0x0f; // vF
- FALLTHROUGH_INTENDED;
- case 2:
- arg[3] = (reg_list >> 8) & 0x0f; // vE
- FALLTHROUGH_INTENDED;
- case 1:
- arg[2] = (reg_list >> 4) & 0x0f; // vD
- FALLTHROUGH_INTENDED;
- default: // case 0
- // The required lambda 'this' is actually a pair, but the pair is implicit.
- arg[0] = VRegC_25x(); // vC
- arg[1] = arg[0] + 1; // vC + 1
- break;
+//------------------------------------------------------------------------------
+// VRegH
+//------------------------------------------------------------------------------
+inline bool Instruction::HasVRegH() const {
+ switch (FormatOf(Opcode())) {
+ case k45cc: return true;
+ case k4rcc: return true;
+ default : return false;
}
}
+inline int32_t Instruction::VRegH() const {
+ switch (FormatOf(Opcode())) {
+ case k45cc: return VRegH_45cc();
+ case k4rcc: return VRegH_4rcc();
+ default :
+ LOG(FATAL) << "Tried to access vH of instruction " << Name() << " which has no H operand.";
+ exit(EXIT_FAILURE);
+ }
+}
+
+inline uint16_t Instruction::VRegH_45cc() const {
+ DCHECK_EQ(FormatOf(Opcode()), k45cc);
+ return Fetch16(3);
+}
+
+inline uint16_t Instruction::VRegH_4rcc() const {
+ DCHECK_EQ(FormatOf(Opcode()), k4rcc);
+ return Fetch16(3);
+}
+
+inline bool Instruction::HasVarArgs() const {
+ return FormatOf(Opcode()) == k35c;
+}
+
inline void Instruction::GetVarArgs(uint32_t arg[kMaxVarArgRegs], uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k35c);
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index d04087a..c31d236 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -69,11 +69,12 @@
int const Instruction::kInstructionSizeInCodeUnits[] = {
#define INSTRUCTION_SIZE(opcode, c, p, format, i, a, v) \
- (((opcode) == NOP) ? -1 : \
- (((format) >= k10x) && ((format) <= k10t)) ? 1 : \
- (((format) >= k20t) && ((format) <= k25x)) ? 2 : \
- (((format) >= k32x) && ((format) <= k3rc)) ? 3 : \
- ((format) == k51l) ? 5 : -1),
+ (((opcode) == NOP) ? -1 : \
+ (((format) >= k10x) && ((format) <= k10t)) ? 1 : \
+ (((format) >= k20t) && ((format) <= k22c)) ? 2 : \
+ (((format) >= k32x) && ((format) <= k3rc)) ? 3 : \
+ (((format) >= k45cc) && ((format) <= k4rcc)) ? 4 : \
+ ((format) == k51l) ? 5 : -1),
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_SIZE)
#undef DEX_INSTRUCTION_LIST
@@ -241,14 +242,6 @@
break;
}
FALLTHROUGH_INTENDED;
- case CREATE_LAMBDA:
- if (file != nullptr) {
- uint32_t method_idx = VRegB_21c();
- os << opcode << " v" << static_cast<int>(VRegA_21c()) << ", " << PrettyMethod(method_idx, *file, true)
- << " // method@" << method_idx;
- break;
- }
- FALLTHROUGH_INTENDED;
default:
os << StringPrintf("%s v%d, thing@%d", opcode, VRegA_21c(), VRegB_21c());
break;
@@ -329,26 +322,6 @@
}
break;
}
- case k25x: {
- if (Opcode() == INVOKE_LAMBDA) {
- uint32_t arg[kMaxVarArgRegs25x];
- GetAllArgs25x(arg);
- const size_t num_extra_var_args = VRegB_25x();
- DCHECK_LE(num_extra_var_args + 2, arraysize(arg));
-
- // invoke-lambda vC, {vD, vE, vF, vG}
- os << opcode << " v" << arg[0] << ", {";
- for (size_t i = 0; i < num_extra_var_args; ++i) {
- if (i != 0) {
- os << ", ";
- }
- os << "v" << arg[i + 2]; // Don't print the pair of vC registers. Pair is implicit.
- }
- os << "}";
- break;
- }
- FALLTHROUGH_INTENDED;
- }
case k32x: os << StringPrintf("%s v%d, v%d", opcode, VRegA_32x(), VRegB_32x()); break;
case k30t: os << StringPrintf("%s %+d", opcode, VRegA_30t()); break;
case k31t: os << StringPrintf("%s v%d, %+d", opcode, VRegA_31t(), VRegB_31t()); break;
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index c7856f0..f437fde 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -105,7 +105,6 @@
k22t, // op vA, vB, +CCCC
k22s, // op vA, vB, #+CCCC
k22c, // op vA, vB, thing@CCCC
- k25x, // op vC, {vD, vE, vF, vG} (B: count)
k32x, // op vAAAA, vBBBB
k30t, // op +AAAAAAAA
k31t, // op vAA, +BBBBBBBB
@@ -113,6 +112,15 @@
k31c, // op vAA, thing@BBBBBBBB
k35c, // op {vC, vD, vE, vF, vG}, thing@BBBB (B: count, A: vG)
k3rc, // op {vCCCC .. v(CCCC+AA-1)}, meth@BBBB
+
+ // op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH (A: count)
+ // format: AG op BBBB FEDC HHHH
+ k45cc,
+
+ // op {VCCCC .. v(CCCC+AA-1)}, meth@BBBB, proto@HHHH (AA: count)
+ // format: AA op BBBB CCCC HHHH
+ k4rcc, // op {VCCCC .. v(CCCC+AA-1)}, meth@BBBB, proto@HHHH (AA: count)
+
k51l, // op vAA, #+BBBBBBBBBBBBBBBB
};
@@ -180,12 +188,9 @@
kVerifyVarArgRangeNonZero = 0x100000,
kVerifyRuntimeOnly = 0x200000,
kVerifyError = 0x400000,
- kVerifyRegCString = 0x800000,
};
static constexpr uint32_t kMaxVarArgRegs = 5;
- static constexpr uint32_t kMaxVarArgRegs25x = 6; // lambdas are 2 registers.
- static constexpr uint32_t kLambdaVirtualRegisterWidth = 2;
// Returns the size (in 2 byte code units) of this instruction.
size_t SizeInCodeUnits() const {
@@ -221,7 +226,7 @@
// Returns a pointer to the instruction after this 2xx instruction in the stream.
const Instruction* Next_2xx() const {
- DCHECK(FormatOf(Opcode()) >= k20t && FormatOf(Opcode()) <= k25x);
+ DCHECK(FormatOf(Opcode()) >= k20t && FormatOf(Opcode()) <= k22c);
return RelativeAt(2);
}
@@ -231,6 +236,12 @@
return RelativeAt(3);
}
+ // Returns a pointer to the instruction after this 4xx instruction in the stream.
+ const Instruction* Next_4xx() const {
+ DCHECK(FormatOf(Opcode()) >= k45cc && FormatOf(Opcode()) <= k4rcc);
+ return RelativeAt(4);
+ }
+
// Returns a pointer to the instruction after this 51l instruction in the stream.
const Instruction* Next_51l() const {
DCHECK(FormatOf(Opcode()) == k51l);
@@ -317,6 +328,12 @@
uint8_t VRegA_51l() const {
return VRegA_51l(Fetch16(0));
}
+ uint4_t VRegA_45cc() const {
+ return VRegA_45cc(Fetch16(0));
+ }
+ uint8_t VRegA_4rcc() const {
+ return VRegA_4rcc(Fetch16(0));
+ }
// The following methods return the vA operand for various instruction formats. The "inst_data"
// parameter holds the first 16 bits of instruction which the returned value is decoded from.
@@ -341,6 +358,8 @@
uint4_t VRegA_35c(uint16_t inst_data) const;
uint8_t VRegA_3rc(uint16_t inst_data) const;
uint8_t VRegA_51l(uint16_t inst_data) const;
+ uint4_t VRegA_45cc(uint16_t inst_data) const;
+ uint8_t VRegA_4rcc(uint16_t inst_data) const;
// VRegB
bool HasVRegB() const;
@@ -371,7 +390,6 @@
}
uint16_t VRegB_22x() const;
uint8_t VRegB_23x() const;
- uint4_t VRegB_25x() const;
uint32_t VRegB_31c() const;
int32_t VRegB_31i() const;
int32_t VRegB_31t() const;
@@ -379,6 +397,8 @@
uint16_t VRegB_35c() const;
uint16_t VRegB_3rc() const;
uint64_t VRegB_51l() const; // vB_wide
+ uint16_t VRegB_45cc() const;
+ uint16_t VRegB_4rcc() const;
// The following methods return the vB operand for all instruction formats where it is encoded in
// the first 16 bits of instruction. The "inst_data" parameter holds these 16 bits. The returned
@@ -398,20 +418,24 @@
int16_t VRegC_22s() const;
int16_t VRegC_22t() const;
uint8_t VRegC_23x() const;
- uint4_t VRegC_25x() const;
uint4_t VRegC_35c() const;
uint16_t VRegC_3rc() const;
+ uint4_t VRegC_45cc() const;
+ uint16_t VRegC_4rcc() const;
+
+
+ // VRegH
+ bool HasVRegH() const;
+ int32_t VRegH() const;
+ uint16_t VRegH_45cc() const;
+ uint16_t VRegH_4rcc() const;
// Fills the given array with the 'arg' array of the instruction.
- bool HasVarArgs35c() const;
- bool HasVarArgs25x() const;
-
- // TODO(iam): Make this name more consistent with GetAllArgs25x by including the opcode format.
+ bool HasVarArgs() const;
void GetVarArgs(uint32_t args[kMaxVarArgRegs], uint16_t inst_data) const;
void GetVarArgs(uint32_t args[kMaxVarArgRegs]) const {
return GetVarArgs(args, Fetch16(0));
}
- void GetAllArgs25x(uint32_t (&args)[kMaxVarArgRegs25x]) const;
// Returns the opcode field of the instruction. The given "inst_data" parameter must be the first
// 16 bits of instruction.
@@ -539,7 +563,7 @@
int GetVerifyTypeArgumentC() const {
return (kInstructionVerifyFlags[Opcode()] & (kVerifyRegC | kVerifyRegCField |
- kVerifyRegCNewArray | kVerifyRegCType | kVerifyRegCWide | kVerifyRegCString));
+ kVerifyRegCNewArray | kVerifyRegCType | kVerifyRegCWide));
}
int GetVerifyExtraFlags() const {
diff --git a/runtime/dex_instruction_list.h b/runtime/dex_instruction_list.h
index acdffd9..e974932 100644
--- a/runtime/dex_instruction_list.h
+++ b/runtime/dex_instruction_list.h
@@ -262,15 +262,16 @@
V(0xF0, IGET_BYTE_QUICK, "iget-byte-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
V(0xF1, IGET_CHAR_QUICK, "iget-char-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
V(0xF2, IGET_SHORT_QUICK, "iget-short-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xF3, INVOKE_LAMBDA, "invoke-lambda", k25x, kIndexNone, kContinue | kThrow | kInvoke | kExperimental, kVerifyRegC /*TODO: | kVerifyVarArg*/) \
+ V(0xF3, UNUSED_F3, "unused-f3", k10x, kIndexUnknown, 0, kVerifyError) \
V(0xF4, UNUSED_F4, "unused-f4", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0xF5, CAPTURE_VARIABLE, "capture-variable", k21c, kIndexStringRef, kExperimental, kVerifyRegA | kVerifyRegBString) \
- V(0xF6, CREATE_LAMBDA, "create-lambda", k21c, kIndexMethodRef, kContinue | kThrow | kExperimental, kVerifyRegA | kVerifyRegBMethod) \
- V(0xF7, LIBERATE_VARIABLE, "liberate-variable", k22c, kIndexStringRef, kExperimental, kVerifyRegA | kVerifyRegB | kVerifyRegCString) \
- V(0xF8, BOX_LAMBDA, "box-lambda", k22x, kIndexNone, kContinue | kExperimental, kVerifyRegA | kVerifyRegB) \
- V(0xF9, UNBOX_LAMBDA, "unbox-lambda", k22c, kIndexTypeRef, kContinue | kThrow | kExperimental, kVerifyRegA | kVerifyRegB | kVerifyRegCType) \
- V(0xFA, UNUSED_FA, "unused-fa", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0xFB, UNUSED_FB, "unused-fb", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0xF5, UNUSED_F5, "unused-f5", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0xF6, UNUSED_F6, "unused-f6", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0xF7, UNUSED_F7, "unused-f7", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0xF8, UNUSED_F8, "unused-f8", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0xF9, UNUSED_F9, "unused-f9", k10x, kIndexUnknown, 0, kVerifyError) \
+ /* TODO(narayan): The following two entries are placeholders. */ \
+ V(0xFA, INVOKE_POLYMORPHIC, "invoke-polymorphic", k45cc, kIndexUnknown, 0, kVerifyError) \
+ V(0xFB, INVOKE_POLYMORPHIC_RANGE, "invoke-polymorphic/range", k4rcc, kIndexUnknown, 0, kVerifyError) \
V(0xFC, UNUSED_FC, "unused-fc", k10x, kIndexUnknown, 0, kVerifyError) \
V(0xFD, UNUSED_FD, "unused-fd", k10x, kIndexUnknown, 0, kVerifyError) \
V(0xFE, UNUSED_FE, "unused-fe", k10x, kIndexUnknown, 0, kVerifyError) \
@@ -293,7 +294,6 @@
V(k22t) \
V(k22s) \
V(k22c) \
- V(k25x) \
V(k32x) \
V(k30t) \
V(k31t) \
diff --git a/runtime/dex_instruction_test.cc b/runtime/dex_instruction_test.cc
index 671ac0e..00c8e07 100644
--- a/runtime/dex_instruction_test.cc
+++ b/runtime/dex_instruction_test.cc
@@ -28,4 +28,96 @@
EXPECT_EQ(Instruction::kVerifyNone, Instruction::VerifyFlagsOf(nop));
}
+static void Build45cc(uint8_t num_args, uint16_t method_idx, uint16_t proto_idx,
+ uint16_t arg_regs, uint16_t* out) {
+ // A = num argument registers
+ // B = method_idx
+ // C - G = argument registers
+ // H = proto_idx
+ //
+ // op = 0xFA
+ //
+ // format:
+ // AG op BBBB FEDC HHHH
+ out[0] = 0;
+ out[0] |= (num_args << 12);
+ out[0] |= 0x00FA;
+
+ out[1] = method_idx;
+ out[2] = arg_regs;
+ out[3] = proto_idx;
+}
+
+static void Build4rcc(uint16_t num_args, uint16_t method_idx, uint16_t proto_idx,
+ uint16_t arg_regs_start, uint16_t* out) {
+ // A = num argument registers
+ // B = method_idx
+ // C = first argument register
+ // H = proto_idx
+ //
+ // op = 0xFB
+ //
+ // format:
+ // AA op BBBB CCCC HHHH
+ out[0] = 0;
+ out[0] |= (num_args << 8);
+ out[0] |= 0x00FB;
+
+ out[1] = method_idx;
+ out[2] = arg_regs_start;
+ out[3] = proto_idx;
+}
+
+TEST(Instruction, PropertiesOf45cc) {
+ uint16_t instruction[4];
+ Build45cc(4u /* num_vregs */, 16u /* method_idx */, 32u /* proto_idx */,
+ 0xcafe /* arg_regs */, instruction);
+
+ const Instruction* ins = Instruction::At(instruction);
+ ASSERT_EQ(4u, ins->SizeInCodeUnits());
+
+ ASSERT_TRUE(ins->HasVRegA());
+ ASSERT_EQ(4, ins->VRegA());
+ ASSERT_EQ(4u, ins->VRegA_45cc());
+ ASSERT_EQ(4u, ins->VRegA_45cc(instruction[0]));
+
+ ASSERT_TRUE(ins->HasVRegB());
+ ASSERT_EQ(16, ins->VRegB());
+ ASSERT_EQ(16u, ins->VRegB_45cc());
+
+ ASSERT_TRUE(ins->HasVRegC());
+ ASSERT_EQ(0xe, ins->VRegC());
+ ASSERT_EQ(0xe, ins->VRegC_45cc());
+
+ ASSERT_TRUE(ins->HasVRegH());
+ ASSERT_EQ(32, ins->VRegH());
+ ASSERT_EQ(32, ins->VRegH_45cc());
+}
+
+TEST(Instruction, PropertiesOf4rcc) {
+ uint16_t instruction[4];
+ Build4rcc(4u /* num_vregs */, 16u /* method_idx */, 32u /* proto_idx */,
+ 0xcafe /* arg_regs */, instruction);
+
+ const Instruction* ins = Instruction::At(instruction);
+ ASSERT_EQ(4u, ins->SizeInCodeUnits());
+
+ ASSERT_TRUE(ins->HasVRegA());
+ ASSERT_EQ(4, ins->VRegA());
+ ASSERT_EQ(4u, ins->VRegA_4rcc());
+ ASSERT_EQ(4u, ins->VRegA_4rcc(instruction[0]));
+
+ ASSERT_TRUE(ins->HasVRegB());
+ ASSERT_EQ(16, ins->VRegB());
+ ASSERT_EQ(16u, ins->VRegB_4rcc());
+
+ ASSERT_TRUE(ins->HasVRegC());
+ ASSERT_EQ(0xcafe, ins->VRegC());
+ ASSERT_EQ(0xcafe, ins->VRegC_4rcc());
+
+ ASSERT_TRUE(ins->HasVRegH());
+ ASSERT_EQ(32, ins->VRegH());
+ ASSERT_EQ(32, ins->VRegH_4rcc());
+}
+
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index b12b118..c045e84 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -31,7 +31,7 @@
// A class may be accessing another class' fields when it doesn't have access, as access has been
// given by inheritance.
ScopedQuickEntrypointChecks sqec(self);
- auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly);
+ auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
return ResolveVerifyAndClinit(type_idx, caller, self, true, false);
}
@@ -39,7 +39,7 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
ScopedQuickEntrypointChecks sqec(self);
- auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly);
+ auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
return ResolveVerifyAndClinit(type_idx, caller, self, false, false);
}
@@ -48,14 +48,14 @@
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
// unpopulated.
ScopedQuickEntrypointChecks sqec(self);
- auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly);
+ auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
return ResolveVerifyAndClinit(type_idx, caller, self, false, true);
}
extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly);
+ auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
return ResolveStringFromCode(caller, string_idx);
}
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index b5e560f..82d5467 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -56,7 +56,7 @@
CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception "
<< self->GetException()->Dump();
// Compute address of return PC and sanity check that it currently holds 0.
- size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsOnly);
+ size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kSaveRefsOnly);
uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
return_pc_offset);
CHECK_EQ(*return_pc, 0U);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 9678079..c67379a 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -46,7 +46,7 @@
static constexpr size_t kBytesStackArgLocation = 4;
// Frame size in bytes of a callee-save frame for RefsAndArgs.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
- GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs);
+ GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs);
#if defined(__arm__)
// The callee save frame is pointed to by SP.
// | argN | |
@@ -75,11 +75,11 @@
static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16;
static constexpr bool kGprFprLockstep = false;
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
- arm::ArmCalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg.
+ arm::ArmCalleeSaveFpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first FPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
- arm::ArmCalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg.
+ arm::ArmCalleeSaveGpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first GPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
- arm::ArmCalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address.
+ arm::ArmCalleeSaveLrOffset(Runtime::kSaveRefsAndArgs); // Offset of return address.
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
@@ -113,11 +113,11 @@
static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
static constexpr bool kGprFprLockstep = false;
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
- arm64::Arm64CalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg.
+ arm64::Arm64CalleeSaveFpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first FPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
- arm64::Arm64CalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg.
+ arm64::Arm64CalleeSaveGpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first GPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
- arm64::Arm64CalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address.
+ arm64::Arm64CalleeSaveLrOffset(Runtime::kSaveRefsAndArgs); // Offset of return address.
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
@@ -307,7 +307,7 @@
static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
- return GetCalleeSaveMethodCaller(sp, Runtime::kRefsAndArgs);
+ return GetCalleeSaveMethodCaller(sp, Runtime::kSaveRefsAndArgs);
}
static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -319,7 +319,7 @@
static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
- const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs);
+ const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs);
ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
@@ -2054,7 +2054,7 @@
static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self,
ArtMethod** sp) {
ScopedQuickEntrypointChecks sqec(self);
- DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
+ DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs));
ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type);
if (UNLIKELY(method == nullptr)) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 01e22a4..553c092 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -80,10 +80,16 @@
// This test ensures that kQuickCalleeSaveFrame_RefAndArgs_FrameSize is correct.
TEST_F(QuickTrampolineEntrypointsTest, FrameSize) {
// We have to use a define here as the callee_save_frame.h functions are constexpr.
-#define CHECK_FRAME_SIZE(isa) \
- CheckFrameSize(isa, Runtime::kRefsAndArgs, GetCalleeSaveFrameSize(isa, Runtime::kRefsAndArgs)); \
- CheckFrameSize(isa, Runtime::kRefsOnly, GetCalleeSaveFrameSize(isa, Runtime::kRefsOnly)); \
- CheckFrameSize(isa, Runtime::kSaveAll, GetCalleeSaveFrameSize(isa, Runtime::kSaveAll))
+#define CHECK_FRAME_SIZE(isa) \
+ CheckFrameSize(isa, \
+ Runtime::kSaveRefsAndArgs, \
+ GetCalleeSaveFrameSize(isa, Runtime::kSaveRefsAndArgs)); \
+ CheckFrameSize(isa, \
+ Runtime::kSaveRefsOnly, \
+ GetCalleeSaveFrameSize(isa, Runtime::kSaveRefsOnly)); \
+ CheckFrameSize(isa, \
+ Runtime::kSaveAllCalleeSaves, \
+ GetCalleeSaveFrameSize(isa, Runtime::kSaveAllCalleeSaves))
CHECK_FRAME_SIZE(kArm);
CHECK_FRAME_SIZE(kArm64);
@@ -108,12 +114,12 @@
// Ensure that the computation in callee_save_frame.h correct.
// Note: we can only check against the kRuntimeISA, because the ArtMethod computation uses
// sizeof(void*), which is wrong when the target bitwidth is not the same as the host's.
- CheckPCOffset(kRuntimeISA, Runtime::kRefsAndArgs,
- GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsAndArgs));
- CheckPCOffset(kRuntimeISA, Runtime::kRefsOnly,
- GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsOnly));
- CheckPCOffset(kRuntimeISA, Runtime::kSaveAll,
- GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kSaveAll));
+ CheckPCOffset(kRuntimeISA, Runtime::kSaveRefsAndArgs,
+ GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kSaveRefsAndArgs));
+ CheckPCOffset(kRuntimeISA, Runtime::kSaveRefsOnly,
+ GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kSaveRefsOnly));
+ CheckPCOffset(kRuntimeISA, Runtime::kSaveAllCalleeSaves,
+ GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kSaveAllCalleeSaves));
}
} // namespace art
diff --git a/runtime/experimental_flags.h b/runtime/experimental_flags.h
index 198f3fa..fde1a5f 100644
--- a/runtime/experimental_flags.h
+++ b/runtime/experimental_flags.h
@@ -26,7 +26,6 @@
// The actual flag values.
enum {
kNone = 0x0000,
- kLambdas = 0x0001,
};
constexpr ExperimentalFlags() : value_(0x0000) {}
@@ -62,15 +61,9 @@
uint32_t value_;
};
-inline std::ostream& operator<<(std::ostream& stream, const ExperimentalFlags& e) {
- bool started = false;
- if (e & ExperimentalFlags::kLambdas) {
- stream << (started ? "|" : "") << "kLambdas";
- started = true;
- }
- if (!started) {
- stream << "kNone";
- }
+inline std::ostream& operator<<(std::ostream& stream,
+ const ExperimentalFlags& e ATTRIBUTE_UNUSED) {
+ stream << "kNone";
return stream;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5485cd2..88fbf78 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -257,6 +257,7 @@
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
+ CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
ScopedTrace trace(__FUNCTION__);
Runtime* const runtime = Runtime::Current();
// If we aren't the zygote, switch to the default non zygote allocator. This may update the
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index bb0d11a..be8ed40 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -132,7 +132,8 @@
static constexpr double kDefaultTargetUtilization = 0.5;
static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
// Primitive arrays larger than this size are put in the large object space.
- static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
+ static constexpr size_t kMinLargeObjectThreshold = 3 * kPageSize;
+ static constexpr size_t kDefaultLargeObjectThreshold = kMinLargeObjectThreshold;
// Whether or not parallel GC is enabled. If not, then we never create the thread pool.
static constexpr bool kDefaultEnableParallelGC = false;
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 8ade185..8d406b3 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -529,6 +529,17 @@
error_msg);
}
if (space != nullptr) {
+ // Check whether there is enough space left over in the data partition. Even if we can load
+ // the image, we need to be conservative, as some parts of the platform are not very tolerant
+ // of space constraints.
+ // ImageSpace doesn't know about the data partition per se, it relies on the FindImageFilename
+ // helper (which relies on GetDalvikCache). So for now, if we load an image out of /system,
+ // ignore the check (as it would test for free space in /system instead).
+ if (!is_system && !CheckSpace(*image_filename, error_msg)) {
+ // No. Delete the generated image and try to run out of the dex files.
+ PruneDalvikCache(image_isa);
+ return nullptr;
+ }
return space;
}
@@ -1430,12 +1441,12 @@
image_header->GetImageMethod(ImageHeader::kImtConflictMethod));
CHECK_EQ(runtime->GetImtUnimplementedMethod(),
image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
- CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kSaveAll),
- image_header->GetImageMethod(ImageHeader::kCalleeSaveMethod));
- CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kRefsOnly),
- image_header->GetImageMethod(ImageHeader::kRefsOnlySaveMethod));
- CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs),
- image_header->GetImageMethod(ImageHeader::kRefsAndArgsSaveMethod));
+ CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kSaveAllCalleeSaves),
+ image_header->GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod));
+ CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kSaveRefsOnly),
+ image_header->GetImageMethod(ImageHeader::kSaveRefsOnlyMethod));
+ CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs),
+ image_header->GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod));
CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kSaveEverything),
image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod));
} else if (!runtime->HasResolutionMethod()) {
@@ -1445,11 +1456,13 @@
runtime->SetImtUnimplementedMethod(
image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
runtime->SetCalleeSaveMethod(
- image_header->GetImageMethod(ImageHeader::kCalleeSaveMethod), Runtime::kSaveAll);
+ image_header->GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod),
+ Runtime::kSaveAllCalleeSaves);
runtime->SetCalleeSaveMethod(
- image_header->GetImageMethod(ImageHeader::kRefsOnlySaveMethod), Runtime::kRefsOnly);
+ image_header->GetImageMethod(ImageHeader::kSaveRefsOnlyMethod), Runtime::kSaveRefsOnly);
runtime->SetCalleeSaveMethod(
- image_header->GetImageMethod(ImageHeader::kRefsAndArgsSaveMethod), Runtime::kRefsAndArgs);
+ image_header->GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod),
+ Runtime::kSaveRefsAndArgs);
runtime->SetCalleeSaveMethod(
image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod), Runtime::kSaveEverything);
}
diff --git a/runtime/gc/space/image_space_fs.h b/runtime/gc/space/image_space_fs.h
index eac52f7..4d539d2 100644
--- a/runtime/gc/space/image_space_fs.h
+++ b/runtime/gc/space/image_space_fs.h
@@ -223,8 +223,21 @@
file.reset(OS::CreateEmptyFile(file_name));
if (file.get() == nullptr) {
+ int saved_errno = errno;
PLOG(WARNING) << "Failed to create boot marker.";
- return;
+ if (saved_errno != ENOSPC) {
+ return;
+ }
+
+ LOG(WARNING) << "Pruning dalvik cache because of low-memory situation.";
+ impl::DeleteDirectoryContents(isa_subdir, false);
+
+ // Try once more.
+ file.reset(OS::OpenFileReadWrite(file_name));
+ if (file == nullptr) {
+ PLOG(WARNING) << "Failed to create boot marker.";
+ return;
+ }
}
} else {
if (!file->ReadFully(&num_failed_boots, sizeof(num_failed_boots))) {
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 3734bcc..0304d0d 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -195,7 +195,8 @@
return root_.IsNull();
}
- ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE GcRoot() {}
+ explicit ALWAYS_INLINE GcRoot(MirrorType* ref) SHARED_REQUIRES(Locks::mutator_lock_);
private:
// Root visitors take pointers to root_ and place them in CompressedReference** arrays. We use a
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index c66029d..716c23d 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -26,14 +26,14 @@
DEFINE_CHECK_EQ(static_cast<size_t>(COMPRESSED_REFERENCE_SIZE), (static_cast<size_t>(sizeof(art::mirror::CompressedReference<art::mirror::Object>))))
#define COMPRESSED_REFERENCE_SIZE_SHIFT 0x2
DEFINE_CHECK_EQ(static_cast<size_t>(COMPRESSED_REFERENCE_SIZE_SHIFT), (static_cast<size_t>(art::WhichPowerOf2(sizeof(art::mirror::CompressedReference<art::mirror::Object>)))))
-#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kSaveAll))))
-#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET 0x8
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kRefsOnly))))
-#define RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET 0x10
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kRefsAndArgs))))
-#define RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET 0x18
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kSaveEverything))))
+#define RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET 0
+DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kSaveAllCalleeSaves))))
+#define RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET 0x8
+DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kSaveRefsOnly))))
+#define RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET 0x10
+DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kSaveRefsAndArgs))))
+#define RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET 0x18
+DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kSaveEverything))))
#define THREAD_FLAGS_OFFSET 0
DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_FLAGS_OFFSET), (static_cast<int32_t>(art::Thread:: ThreadFlagsOffset<art::kRuntimePointerSize>().Int32Value())))
#define THREAD_ID_OFFSET 12
@@ -70,6 +70,8 @@
DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())))
#define ART_METHOD_QUICK_CODE_OFFSET_64 48
DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())))
+#define MIN_LARGE_OBJECT_THRESHOLD 0x3000
+DEFINE_CHECK_EQ(static_cast<size_t>(MIN_LARGE_OBJECT_THRESHOLD), (static_cast<size_t>(art::gc::Heap::kMinLargeObjectThreshold)))
#define LOCK_WORD_STATE_SHIFT 30
DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kStateShift)))
#define LOCK_WORD_STATE_MASK 0xc0000000
@@ -96,6 +98,8 @@
DEFINE_CHECK_EQ(static_cast<size_t>(OBJECT_ALIGNMENT_MASK), (static_cast<size_t>(art::kObjectAlignment - 1)))
#define OBJECT_ALIGNMENT_MASK_TOGGLED 0xfffffff8
DEFINE_CHECK_EQ(static_cast<uint32_t>(OBJECT_ALIGNMENT_MASK_TOGGLED), (static_cast<uint32_t>(~static_cast<uint32_t>(art::kObjectAlignment - 1))))
+#define OBJECT_ALIGNMENT_MASK_TOGGLED64 0xfffffffffffffff8
+DEFINE_CHECK_EQ(static_cast<uint64_t>(OBJECT_ALIGNMENT_MASK_TOGGLED64), (static_cast<uint64_t>(~static_cast<uint64_t>(art::kObjectAlignment - 1))))
#define ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE 128
DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), (static_cast<int32_t>((art::gc::allocator::RosAlloc::kMaxThreadLocalBracketSize))))
#define ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT 3
diff --git a/runtime/image.h b/runtime/image.h
index 207a818..9ff18d6 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -183,9 +183,9 @@
kResolutionMethod,
kImtConflictMethod,
kImtUnimplementedMethod,
- kCalleeSaveMethod,
- kRefsOnlySaveMethod,
- kRefsAndArgsSaveMethod,
+ kSaveAllCalleeSavesMethod,
+ kSaveRefsOnlyMethod,
+ kSaveRefsAndArgsMethod,
kSaveEverythingMethod,
kImageMethodsCount, // Number of elements in enum.
};
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 61ffe44..4a86e36 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -356,7 +356,7 @@
LOG(INFO) << " Removing exit stub in " << DescribeLocation();
}
if (instrumentation_frame.interpreter_entry_) {
- CHECK(m == Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
+ CHECK(m == Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs));
} else {
CHECK(m == instrumentation_frame.method_) << PrettyMethod(m);
}
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 11b7ef4..ac146b3 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -30,9 +30,6 @@
namespace art {
namespace interpreter {
-// All lambda closures have to be a consecutive pair of virtual registers.
-static constexpr size_t kLambdaVirtualRegisterWidth = 2;
-
void ThrowNullPointerExceptionFromInterpreter() {
ThrowNullPointerExceptionFromDexPC();
}
@@ -732,7 +729,6 @@
// Fast path: no extra checks.
if (is_range) {
- // TODO: Implement the range version of invoke-lambda
uint16_t first_src_reg = vregC;
for (size_t src_reg = first_src_reg, dest_reg = first_dest_reg; dest_reg < num_regs;
@@ -772,34 +768,6 @@
}
template<bool is_range, bool do_assignability_check>
-bool DoLambdaCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, uint16_t inst_data ATTRIBUTE_UNUSED, JValue* result) {
- const uint4_t num_additional_registers = inst->VRegB_25x();
- // Argument word count.
- const uint16_t number_of_inputs = num_additional_registers + kLambdaVirtualRegisterWidth;
- // The lambda closure register is always present and is not encoded in the count.
- // Furthermore, the lambda closure register is always wide, so it counts as 2 inputs.
-
- // TODO: find a cleaner way to separate non-range and range information without duplicating
- // code.
- uint32_t arg[Instruction::kMaxVarArgRegs25x]; // only used in invoke-XXX.
- uint32_t vregC = 0; // only used in invoke-XXX-range.
- if (is_range) {
- vregC = inst->VRegC_3rc();
- } else {
- // TODO(iam): See if it's possible to remove inst_data dependency from 35x to avoid this path
- inst->GetAllArgs25x(arg);
- }
-
- // TODO: if there's an assignability check, throw instead?
- DCHECK(called_method->IsStatic());
-
- return DoCallCommon<is_range, do_assignability_check>(
- called_method, self, shadow_frame,
- result, number_of_inputs, arg, vregC);
-}
-
-template<bool is_range, bool do_assignability_check>
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result) {
// Argument word count.
@@ -947,20 +915,6 @@
EXPLICIT_DO_CALL_TEMPLATE_DECL(true, true);
#undef EXPLICIT_DO_CALL_TEMPLATE_DECL
-// Explicit DoLambdaCall template function declarations.
-#define EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
- bool DoLambdaCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
- ShadowFrame& shadow_frame, \
- const Instruction* inst, \
- uint16_t inst_data, \
- JValue* result)
-EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(false, false);
-EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(false, true);
-EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(true, false);
-EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(true, true);
-#undef EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL
-
// Explicit DoFilledNewArray template function declarations.
#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \
template SHARED_REQUIRES(Locks::mutator_lock_) \
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 174d4e0..4fd1514 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -36,14 +36,7 @@
#include "entrypoints/entrypoint_utils-inl.h"
#include "handle_scope-inl.h"
#include "jit/jit.h"
-#include "lambda/art_lambda_method.h"
-#include "lambda/box_table.h"
-#include "lambda/closure.h"
-#include "lambda/closure_builder-inl.h"
-#include "lambda/leaking_allocator.h"
-#include "lambda/shorty_field_type.h"
#include "mirror/class-inl.h"
-#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
@@ -142,488 +135,7 @@
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result);
-// Invokes the given lambda closure. This is part of the invocation support and is used by
-// DoLambdaInvoke functions.
-// Returns true on success, otherwise throws an exception and returns false.
-template<bool is_range, bool do_assignability_check>
-bool DoLambdaCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, uint16_t inst_data, JValue* result);
-
-// Validates that the art method corresponding to a lambda method target
-// is semantically valid:
-//
-// Must be ACC_STATIC and ACC_LAMBDA. Must be a concrete managed implementation
-// (i.e. not native, not proxy, not abstract, ...).
-//
-// If the validation fails, return false and raise an exception.
-static inline bool IsValidLambdaTargetOrThrow(ArtMethod* called_method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- bool success = false;
-
- if (UNLIKELY(called_method == nullptr)) {
- // The shadow frame should already be pushed, so we don't need to update it.
- } else if (UNLIKELY(!called_method->IsInvokable())) {
- called_method->ThrowInvocationTimeError();
- // We got an error.
- // TODO(iam): Also handle the case when the method is non-static, what error do we throw?
- // TODO(iam): Also make sure that ACC_LAMBDA is set.
- } else if (UNLIKELY(called_method->GetCodeItem() == nullptr)) {
- // Method could be native, proxy method, etc. Lambda targets have to be concrete impls,
- // so don't allow this.
- } else {
- success = true;
- }
-
- return success;
-}
-
-// Write out the 'Closure*' into vreg and vreg+1, as if it was a jlong.
-static inline void WriteLambdaClosureIntoVRegs(ShadowFrame& shadow_frame,
- const lambda::Closure& lambda_closure,
- uint32_t vreg) {
- // Split the method into a lo and hi 32 bits so we can encode them into 2 virtual registers.
- uint32_t closure_lo = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(&lambda_closure));
- uint32_t closure_hi = static_cast<uint32_t>(reinterpret_cast<uint64_t>(&lambda_closure)
- >> BitSizeOf<uint32_t>());
- // Use uint64_t instead of uintptr_t to allow shifting past the max on 32-bit.
- static_assert(sizeof(uint64_t) >= sizeof(uintptr_t), "Impossible");
-
- DCHECK_NE(closure_lo | closure_hi, 0u);
-
- shadow_frame.SetVReg(vreg, closure_lo);
- shadow_frame.SetVReg(vreg + 1, closure_hi);
-}
-
-// Handles create-lambda instructions.
-// Returns true on success, otherwise throws an exception and returns false.
-// (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
-//
-// The closure must be allocated big enough to hold the data, and should not be
-// pre-initialized. It is initialized with the actual captured variables as a side-effect,
-// although this should be unimportant to the caller since this function also handles storing it to
-// the ShadowFrame.
-//
-// As a work-in-progress implementation, this shoves the ArtMethod object corresponding
-// to the target dex method index into the target register vA and vA + 1.
-template<bool do_access_check>
-static inline bool DoCreateLambda(Thread* self,
- const Instruction* inst,
- /*inout*/ShadowFrame& shadow_frame,
- /*inout*/lambda::ClosureBuilder* closure_builder,
- /*inout*/lambda::Closure* uninitialized_closure) {
- DCHECK(closure_builder != nullptr);
- DCHECK(uninitialized_closure != nullptr);
- DCHECK_ALIGNED(uninitialized_closure, alignof(lambda::Closure));
-
- using lambda::ArtLambdaMethod;
- using lambda::LeakingAllocator;
-
- /*
- * create-lambda is opcode 0x21c
- * - vA is the target register where the closure will be stored into
- * (also stores into vA + 1)
- * - vB is the method index which will be the target for a later invoke-lambda
- */
- const uint32_t method_idx = inst->VRegB_21c();
- mirror::Object* receiver = nullptr; // Always static. (see 'kStatic')
- ArtMethod* sf_method = shadow_frame.GetMethod();
- ArtMethod* const called_method = FindMethodFromCode<kStatic, do_access_check>(
- method_idx, &receiver, sf_method, self);
-
- uint32_t vreg_dest_closure = inst->VRegA_21c();
-
- if (UNLIKELY(!IsValidLambdaTargetOrThrow(called_method))) {
- CHECK(self->IsExceptionPending());
- shadow_frame.SetVReg(vreg_dest_closure, 0u);
- shadow_frame.SetVReg(vreg_dest_closure + 1, 0u);
- return false;
- }
-
- ArtLambdaMethod* initialized_lambda_method;
- // Initialize the ArtLambdaMethod with the right data.
- {
- // Allocate enough memory to store a well-aligned ArtLambdaMethod.
- // This is not the final type yet since the data starts out uninitialized.
- LeakingAllocator::AlignedMemoryStorage<ArtLambdaMethod>* uninitialized_lambda_method =
- LeakingAllocator::AllocateMemory<ArtLambdaMethod>(self);
-
- std::string captured_variables_shorty = closure_builder->GetCapturedVariableShortyTypes();
- std::string captured_variables_long_type_desc;
-
- // Synthesize a long type descriptor from the short one.
- for (char shorty : captured_variables_shorty) {
- lambda::ShortyFieldType shorty_field_type(shorty);
- if (shorty_field_type.IsObject()) {
- // Not the true type, but good enough until we implement verifier support.
- captured_variables_long_type_desc += "Ljava/lang/Object;";
- UNIMPLEMENTED(FATAL) << "create-lambda with an object captured variable";
- } else if (shorty_field_type.IsLambda()) {
- // Not the true type, but good enough until we implement verifier support.
- captured_variables_long_type_desc += "Ljava/lang/Runnable;";
- UNIMPLEMENTED(FATAL) << "create-lambda with a lambda captured variable";
- } else {
- // The primitive types have the same length shorty or not, so this is always correct.
- DCHECK(shorty_field_type.IsPrimitive());
- captured_variables_long_type_desc += shorty_field_type;
- }
- }
-
- // Copy strings to dynamically allocated storage. This leaks, but that's ok. Fix it later.
- // TODO: Strings need to come from the DexFile, so they won't need their own allocations.
- char* captured_variables_type_desc = LeakingAllocator::MakeFlexibleInstance<char>(
- self,
- captured_variables_long_type_desc.size() + 1);
- strcpy(captured_variables_type_desc, captured_variables_long_type_desc.c_str());
- char* captured_variables_shorty_copy = LeakingAllocator::MakeFlexibleInstance<char>(
- self,
- captured_variables_shorty.size() + 1);
- strcpy(captured_variables_shorty_copy, captured_variables_shorty.c_str());
-
- // After initialization, the object at the storage is well-typed. Use strong type going forward.
- initialized_lambda_method =
- new (uninitialized_lambda_method) ArtLambdaMethod(called_method,
- captured_variables_type_desc,
- captured_variables_shorty_copy,
- true); // innate lambda
- }
-
- // Write all the closure captured variables and the closure header into the closure.
- lambda::Closure* initialized_closure =
- closure_builder->CreateInPlace(uninitialized_closure, initialized_lambda_method);
-
- WriteLambdaClosureIntoVRegs(/*inout*/shadow_frame, *initialized_closure, vreg_dest_closure);
- return true;
-}
-
-// Reads out the 'ArtMethod*' stored inside of vreg and vreg+1
-//
-// Validates that the art method points to a valid lambda function, otherwise throws
-// an exception and returns null.
-// (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
-static inline lambda::Closure* ReadLambdaClosureFromVRegsOrThrow(ShadowFrame& shadow_frame,
- uint32_t vreg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- // Lambda closures take up a consecutive pair of 2 virtual registers.
- // On 32-bit the high bits are always 0.
- uint32_t vc_value_lo = shadow_frame.GetVReg(vreg);
- uint32_t vc_value_hi = shadow_frame.GetVReg(vreg + 1);
-
- uint64_t vc_value_ptr = (static_cast<uint64_t>(vc_value_hi) << BitSizeOf<uint32_t>())
- | vc_value_lo;
-
- // Use uint64_t instead of uintptr_t to allow left-shifting past the max on 32-bit.
- static_assert(sizeof(uint64_t) >= sizeof(uintptr_t), "Impossible");
- lambda::Closure* const lambda_closure = reinterpret_cast<lambda::Closure*>(vc_value_ptr);
- DCHECK_ALIGNED(lambda_closure, alignof(lambda::Closure));
-
- // Guard against the user passing a null closure, which is odd but (sadly) semantically valid.
- if (UNLIKELY(lambda_closure == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- return nullptr;
- } else if (UNLIKELY(!IsValidLambdaTargetOrThrow(lambda_closure->GetTargetMethod()))) {
- // Sanity check against data corruption.
- return nullptr;
- }
-
- return lambda_closure;
-}
-
-// Forward declaration for lock annotations. See below for documentation.
-template <bool do_access_check>
-static inline const char* GetStringDataByDexStringIndexOrThrow(ShadowFrame& shadow_frame,
- uint32_t string_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
-// Find the c-string data corresponding to a dex file's string index.
-// Otherwise, returns null if not found and throws a VerifyError.
-//
-// Note that with do_access_check=false, we never return null because the verifier
-// must guard against invalid string indices.
-// (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
-template <bool do_access_check>
-static inline const char* GetStringDataByDexStringIndexOrThrow(ShadowFrame& shadow_frame,
- uint32_t string_idx) {
- ArtMethod* method = shadow_frame.GetMethod();
- const DexFile* dex_file = method->GetDexFile();
-
- mirror::Class* declaring_class = method->GetDeclaringClass();
- if (!do_access_check) {
- // MethodVerifier refuses methods with string_idx out of bounds.
- DCHECK_LT(string_idx, declaring_class->GetDexCache()->NumStrings());
- } else {
- // Access checks enabled: perform string index bounds ourselves.
- if (string_idx >= dex_file->GetHeader().string_ids_size_) {
- ThrowVerifyError(declaring_class, "String index '%" PRIu32 "' out of bounds",
- string_idx);
- return nullptr;
- }
- }
-
- const char* type_string = dex_file->StringDataByIdx(string_idx);
-
- if (UNLIKELY(type_string == nullptr)) {
- CHECK_EQ(false, do_access_check)
- << " verifier should've caught invalid string index " << string_idx;
- CHECK_EQ(true, do_access_check)
- << " string idx size check should've caught invalid string index " << string_idx;
- }
-
- return type_string;
-}
-
-// Handles capture-variable instructions.
-// Returns true on success, otherwise throws an exception and returns false.
-// (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
-template<bool do_access_check>
-static inline bool DoCaptureVariable(Thread* self,
- const Instruction* inst,
- /*inout*/ShadowFrame& shadow_frame,
- /*inout*/lambda::ClosureBuilder* closure_builder) {
- DCHECK(closure_builder != nullptr);
- using lambda::ShortyFieldType;
- /*
- * capture-variable is opcode 0xf6, fmt 0x21c
- * - vA is the source register of the variable that will be captured
- * - vB is the string ID of the variable's type that will be captured
- */
- const uint32_t source_vreg = inst->VRegA_21c();
- const uint32_t string_idx = inst->VRegB_21c();
- // TODO: this should be a proper [type id] instead of a [string ID] pointing to a type.
-
- const char* type_string = GetStringDataByDexStringIndexOrThrow<do_access_check>(shadow_frame,
- string_idx);
- if (UNLIKELY(type_string == nullptr)) {
- CHECK(self->IsExceptionPending());
- return false;
- }
-
- char type_first_letter = type_string[0];
- ShortyFieldType shorty_type;
- if (do_access_check &&
- UNLIKELY(!ShortyFieldType::MaybeCreate(type_first_letter, /*out*/&shorty_type))) { // NOLINT: [whitespace/comma] [3]
- ThrowVerifyError(shadow_frame.GetMethod()->GetDeclaringClass(),
- "capture-variable vB must be a valid type");
- return false;
- } else {
- // Already verified that the type is valid.
- shorty_type = ShortyFieldType(type_first_letter);
- }
-
- const size_t captured_variable_count = closure_builder->GetCaptureCount();
-
- // Note: types are specified explicitly so that the closure is packed tightly.
- switch (shorty_type) {
- case ShortyFieldType::kBoolean: {
- uint32_t primitive_narrow_value = shadow_frame.GetVReg(source_vreg);
- closure_builder->CaptureVariablePrimitive<bool>(primitive_narrow_value);
- break;
- }
- case ShortyFieldType::kByte: {
- uint32_t primitive_narrow_value = shadow_frame.GetVReg(source_vreg);
- closure_builder->CaptureVariablePrimitive<int8_t>(primitive_narrow_value);
- break;
- }
- case ShortyFieldType::kChar: {
- uint32_t primitive_narrow_value = shadow_frame.GetVReg(source_vreg);
- closure_builder->CaptureVariablePrimitive<uint16_t>(primitive_narrow_value);
- break;
- }
- case ShortyFieldType::kShort: {
- uint32_t primitive_narrow_value = shadow_frame.GetVReg(source_vreg);
- closure_builder->CaptureVariablePrimitive<int16_t>(primitive_narrow_value);
- break;
- }
- case ShortyFieldType::kInt: {
- uint32_t primitive_narrow_value = shadow_frame.GetVReg(source_vreg);
- closure_builder->CaptureVariablePrimitive<int32_t>(primitive_narrow_value);
- break;
- }
- case ShortyFieldType::kDouble: {
- closure_builder->CaptureVariablePrimitive(shadow_frame.GetVRegDouble(source_vreg));
- break;
- }
- case ShortyFieldType::kFloat: {
- closure_builder->CaptureVariablePrimitive(shadow_frame.GetVRegFloat(source_vreg));
- break;
- }
- case ShortyFieldType::kLambda: {
- UNIMPLEMENTED(FATAL) << " capture-variable with type kLambda";
- // TODO: Capturing lambdas recursively will be done at a later time.
- UNREACHABLE();
- }
- case ShortyFieldType::kLong: {
- closure_builder->CaptureVariablePrimitive(shadow_frame.GetVRegLong(source_vreg));
- break;
- }
- case ShortyFieldType::kObject: {
- closure_builder->CaptureVariableObject(shadow_frame.GetVRegReference(source_vreg));
- UNIMPLEMENTED(FATAL) << " capture-variable with type kObject";
- // TODO: finish implementing this. disabled for now since we can't track lambda refs for GC.
- UNREACHABLE();
- }
-
- default:
- LOG(FATAL) << "Invalid shorty type value " << shorty_type;
- UNREACHABLE();
- }
-
- DCHECK_EQ(captured_variable_count + 1, closure_builder->GetCaptureCount());
-
- return true;
-}
-
-// Handles capture-variable instructions.
-// Returns true on success, otherwise throws an exception and returns false.
-// (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
-template<bool do_access_check>
-static inline bool DoLiberateVariable(Thread* self,
- const Instruction* inst,
- size_t captured_variable_index,
- /*inout*/ShadowFrame& shadow_frame) {
- using lambda::ShortyFieldType;
- /*
- * liberate-variable is opcode 0xf7, fmt 0x22c
- * - vA is the destination register
- * - vB is the register with the lambda closure in it
- * - vC is the string ID which needs to be a valid field type descriptor
- */
-
- const uint32_t dest_vreg = inst->VRegA_22c();
- const uint32_t closure_vreg = inst->VRegB_22c();
- const uint32_t string_idx = inst->VRegC_22c();
- // TODO: this should be a proper [type id] instead of a [string ID] pointing to a type.
-
-
- // Synthesize a long type descriptor from a shorty type descriptor list.
- // TODO: Fix the dex encoding to contain the long and short type descriptors.
- const char* type_string = GetStringDataByDexStringIndexOrThrow<do_access_check>(shadow_frame,
- string_idx);
- if (UNLIKELY(do_access_check && type_string == nullptr)) {
- CHECK(self->IsExceptionPending());
- shadow_frame.SetVReg(dest_vreg, 0);
- return false;
- }
-
- char type_first_letter = type_string[0];
- ShortyFieldType shorty_type;
- if (do_access_check &&
- UNLIKELY(!ShortyFieldType::MaybeCreate(type_first_letter, /*out*/&shorty_type))) { // NOLINT: [whitespace/comma] [3]
- ThrowVerifyError(shadow_frame.GetMethod()->GetDeclaringClass(),
- "liberate-variable vC must be a valid type");
- shadow_frame.SetVReg(dest_vreg, 0);
- return false;
- } else {
- // Already verified that the type is valid.
- shorty_type = ShortyFieldType(type_first_letter);
- }
-
- // Check for closure being null *after* the type check.
- // This way we can access the type info in case we fail later, to know how many vregs to clear.
- const lambda::Closure* lambda_closure =
- ReadLambdaClosureFromVRegsOrThrow(/*inout*/shadow_frame, closure_vreg);
-
- // Failed lambda target runtime check, an exception was raised.
- if (UNLIKELY(lambda_closure == nullptr)) {
- CHECK(self->IsExceptionPending());
-
- // Clear the destination vreg(s) to be safe.
- shadow_frame.SetVReg(dest_vreg, 0);
- if (shorty_type.IsPrimitiveWide() || shorty_type.IsLambda()) {
- shadow_frame.SetVReg(dest_vreg + 1, 0);
- }
- return false;
- }
-
- if (do_access_check &&
- UNLIKELY(captured_variable_index >= lambda_closure->GetNumberOfCapturedVariables())) {
- ThrowVerifyError(shadow_frame.GetMethod()->GetDeclaringClass(),
- "liberate-variable captured variable index %zu out of bounds",
- lambda_closure->GetNumberOfCapturedVariables());
- // Clear the destination vreg(s) to be safe.
- shadow_frame.SetVReg(dest_vreg, 0);
- if (shorty_type.IsPrimitiveWide() || shorty_type.IsLambda()) {
- shadow_frame.SetVReg(dest_vreg + 1, 0);
- }
- return false;
- }
-
- // Verify that the runtime type of the captured-variable matches the requested dex type.
- if (do_access_check) {
- ShortyFieldType actual_type = lambda_closure->GetCapturedShortyType(captured_variable_index);
- if (actual_type != shorty_type) {
- ThrowVerifyError(shadow_frame.GetMethod()->GetDeclaringClass(),
- "cannot liberate-variable of runtime type '%c' to dex type '%c'",
- static_cast<char>(actual_type),
- static_cast<char>(shorty_type));
-
- shadow_frame.SetVReg(dest_vreg, 0);
- if (shorty_type.IsPrimitiveWide() || shorty_type.IsLambda()) {
- shadow_frame.SetVReg(dest_vreg + 1, 0);
- }
- return false;
- }
-
- if (actual_type.IsLambda() || actual_type.IsObject()) {
- UNIMPLEMENTED(FATAL) << "liberate-variable type checks needs to "
- << "parse full type descriptor for objects and lambdas";
- }
- }
-
- // Unpack the captured variable from the closure into the correct type, then save it to the vreg.
- if (shorty_type.IsPrimitiveNarrow()) {
- uint32_t primitive_narrow_value =
- lambda_closure->GetCapturedPrimitiveNarrow(captured_variable_index);
- shadow_frame.SetVReg(dest_vreg, primitive_narrow_value);
- } else if (shorty_type.IsPrimitiveWide()) {
- uint64_t primitive_wide_value =
- lambda_closure->GetCapturedPrimitiveWide(captured_variable_index);
- shadow_frame.SetVRegLong(dest_vreg, static_cast<int64_t>(primitive_wide_value));
- } else if (shorty_type.IsObject()) {
- mirror::Object* unpacked_object =
- lambda_closure->GetCapturedObject(captured_variable_index);
- shadow_frame.SetVRegReference(dest_vreg, unpacked_object);
-
- UNIMPLEMENTED(FATAL) << "liberate-variable cannot unpack objects yet";
- } else if (shorty_type.IsLambda()) {
- UNIMPLEMENTED(FATAL) << "liberate-variable cannot unpack lambdas yet";
- } else {
- LOG(FATAL) << "unreachable";
- UNREACHABLE();
- }
-
- return true;
-}
-
-template<bool do_access_check>
-static inline bool DoInvokeLambda(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
- uint16_t inst_data, JValue* result) {
- /*
- * invoke-lambda is opcode 0x25
- *
- * - vC is the closure register (both vC and vC + 1 will be used to store the closure).
- * - vB is the number of additional registers up to |{vD,vE,vF,vG}| (4)
- * - the rest of the registers are always var-args
- *
- * - reading var-args for 0x25 gets us vD,vE,vF,vG (but not vB)
- */
- uint32_t vreg_closure = inst->VRegC_25x();
- const lambda::Closure* lambda_closure =
- ReadLambdaClosureFromVRegsOrThrow(shadow_frame, vreg_closure);
-
- // Failed lambda target runtime check, an exception was raised.
- if (UNLIKELY(lambda_closure == nullptr)) {
- CHECK(self->IsExceptionPending());
- result->SetJ(0);
- return false;
- }
-
- ArtMethod* const called_method = lambda_closure->GetTargetMethod();
- // Invoke a non-range lambda
- return DoLambdaCall<false, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
- result);
-}
-
-// Handles invoke-XXX/range instructions (other than invoke-lambda[-range]).
+// Handles invoke-XXX/range instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<InvokeType type, bool is_range, bool do_access_check>
static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
@@ -904,74 +416,6 @@
return 3;
}
-template <bool _do_check>
-static inline bool DoBoxLambda(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
- uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_) {
- /*
- * box-lambda vA, vB /// opcode 0xf8, format 22x
- * - vA is the target register where the Object representation of the closure will be stored into
- * - vB is a closure (made by create-lambda)
- * (also reads vB + 1)
- */
- uint32_t vreg_target_object = inst->VRegA_22x(inst_data);
- uint32_t vreg_source_closure = inst->VRegB_22x();
-
- lambda::Closure* lambda_closure = ReadLambdaClosureFromVRegsOrThrow(shadow_frame,
- vreg_source_closure);
-
- // Failed lambda target runtime check, an exception was raised.
- if (UNLIKELY(lambda_closure == nullptr)) {
- CHECK(self->IsExceptionPending());
- return false;
- }
-
- mirror::Object* closure_as_object =
- Runtime::Current()->GetLambdaBoxTable()->BoxLambda(lambda_closure);
-
- // Failed to box the lambda, an exception was raised.
- if (UNLIKELY(closure_as_object == nullptr)) {
- CHECK(self->IsExceptionPending());
- return false;
- }
-
- shadow_frame.SetVRegReference(vreg_target_object, closure_as_object);
- return true;
-}
-
-template <bool _do_check> SHARED_REQUIRES(Locks::mutator_lock_)
-static inline bool DoUnboxLambda(Thread* self,
- ShadowFrame& shadow_frame,
- const Instruction* inst,
- uint16_t inst_data) {
- /*
- * unbox-lambda vA, vB, [type id] /// opcode 0xf9, format 22c
- * - vA is the target register where the closure will be written into
- * (also writes vA + 1)
- * - vB is the Object representation of the closure (made by box-lambda)
- */
- uint32_t vreg_target_closure = inst->VRegA_22c(inst_data);
- uint32_t vreg_source_object = inst->VRegB_22c();
-
- // Raise NullPointerException if object is null
- mirror::Object* boxed_closure_object = shadow_frame.GetVRegReference(vreg_source_object);
- if (UNLIKELY(boxed_closure_object == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- return false;
- }
-
- lambda::Closure* unboxed_closure = nullptr;
- // Raise an exception if unboxing fails.
- if (!Runtime::Current()->GetLambdaBoxTable()->UnboxLambda(boxed_closure_object,
- /*out*/&unboxed_closure)) {
- CHECK(self->IsExceptionPending());
- return false;
- }
-
- DCHECK(unboxed_closure != nullptr);
- WriteLambdaClosureIntoVRegs(/*inout*/shadow_frame, *unboxed_closure, vreg_target_closure);
- return true;
-}
-
uint32_t FindNextInstructionFollowingException(Thread* self, ShadowFrame& shadow_frame,
uint32_t dex_pc, const instrumentation::Instrumentation* instrumentation)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -1058,72 +502,6 @@
EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(true); // invoke-virtual-quick-range.
#undef EXPLICIT_INSTANTIATION_DO_INVOKE_VIRTUAL_QUICK
-// Explicitly instantiate all DoCreateLambda functions.
-#define EXPLICIT_DO_CREATE_LAMBDA_DECL(_do_check) \
-template SHARED_REQUIRES(Locks::mutator_lock_) \
-bool DoCreateLambda<_do_check>(Thread* self, \
- const Instruction* inst, \
- /*inout*/ShadowFrame& shadow_frame, \
- /*inout*/lambda::ClosureBuilder* closure_builder, \
- /*inout*/lambda::Closure* uninitialized_closure);
-
-EXPLICIT_DO_CREATE_LAMBDA_DECL(false); // create-lambda
-EXPLICIT_DO_CREATE_LAMBDA_DECL(true); // create-lambda
-#undef EXPLICIT_DO_CREATE_LAMBDA_DECL
-
-// Explicitly instantiate all DoInvokeLambda functions.
-#define EXPLICIT_DO_INVOKE_LAMBDA_DECL(_do_check) \
-template SHARED_REQUIRES(Locks::mutator_lock_) \
-bool DoInvokeLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
- uint16_t inst_data, JValue* result);
-
-EXPLICIT_DO_INVOKE_LAMBDA_DECL(false); // invoke-lambda
-EXPLICIT_DO_INVOKE_LAMBDA_DECL(true); // invoke-lambda
-#undef EXPLICIT_DO_INVOKE_LAMBDA_DECL
-
-// Explicitly instantiate all DoBoxLambda functions.
-#define EXPLICIT_DO_BOX_LAMBDA_DECL(_do_check) \
-template SHARED_REQUIRES(Locks::mutator_lock_) \
-bool DoBoxLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
- uint16_t inst_data);
-
-EXPLICIT_DO_BOX_LAMBDA_DECL(false); // box-lambda
-EXPLICIT_DO_BOX_LAMBDA_DECL(true); // box-lambda
-#undef EXPLICIT_DO_BOX_LAMBDA_DECL
-
-// Explicitly instantiate all DoUnBoxLambda functions.
-#define EXPLICIT_DO_UNBOX_LAMBDA_DECL(_do_check) \
-template SHARED_REQUIRES(Locks::mutator_lock_) \
-bool DoUnboxLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
- uint16_t inst_data);
-
-EXPLICIT_DO_UNBOX_LAMBDA_DECL(false); // unbox-lambda
-EXPLICIT_DO_UNBOX_LAMBDA_DECL(true); // unbox-lambda
-#undef EXPLICIT_DO_BOX_LAMBDA_DECL
-
-// Explicitly instantiate all DoCaptureVariable functions.
-#define EXPLICIT_DO_CAPTURE_VARIABLE_DECL(_do_check) \
-template SHARED_REQUIRES(Locks::mutator_lock_) \
-bool DoCaptureVariable<_do_check>(Thread* self, \
- const Instruction* inst, \
- ShadowFrame& shadow_frame, \
- lambda::ClosureBuilder* closure_builder);
-
-EXPLICIT_DO_CAPTURE_VARIABLE_DECL(false); // capture-variable
-EXPLICIT_DO_CAPTURE_VARIABLE_DECL(true); // capture-variable
-#undef EXPLICIT_DO_CREATE_LAMBDA_DECL
-
-// Explicitly instantiate all DoLiberateVariable functions.
-#define EXPLICIT_DO_LIBERATE_VARIABLE_DECL(_do_check) \
-template SHARED_REQUIRES(Locks::mutator_lock_) \
-bool DoLiberateVariable<_do_check>(Thread* self, \
- const Instruction* inst, \
- size_t captured_variable_index, \
- ShadowFrame& shadow_frame); \
-
-EXPLICIT_DO_LIBERATE_VARIABLE_DECL(false); // liberate-variable
-EXPLICIT_DO_LIBERATE_VARIABLE_DECL(true); // liberate-variable
-#undef EXPLICIT_DO_LIBERATE_LAMBDA_DECL
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 3b6e015..43b2778 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -18,14 +18,11 @@
// Clang 3.4 fails to build the goto interpreter implementation.
-#include "base/stl_util.h" // MakeUnique
#include "experimental_flags.h"
#include "interpreter_common.h"
#include "jit/jit.h"
#include "safe_math.h"
-#include <memory> // std::unique_ptr
-
namespace art {
namespace interpreter {
@@ -93,16 +90,6 @@
#define HANDLE_INSTRUCTION_START(opcode) op_##opcode: // NOLINT(whitespace/labels)
#define HANDLE_INSTRUCTION_END() UNREACHABLE_CODE_CHECK()
-// Use with instructions labeled with kExperimental flag:
-#define HANDLE_EXPERIMENTAL_INSTRUCTION_START(opcode) \
- HANDLE_INSTRUCTION_START(opcode); \
- DCHECK(inst->IsExperimental()); \
- if (Runtime::Current()->AreExperimentalFlagsEnabled(ExperimentalFlags::kLambdas)) {
-#define HANDLE_EXPERIMENTAL_INSTRUCTION_END() \
- } else { \
- UnexpectedOpcode(inst, shadow_frame); \
- } HANDLE_INSTRUCTION_END();
-
#define HANDLE_MONITOR_CHECKS() \
if (!DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame)) { \
HANDLE_PENDING_EXCEPTION(); \
@@ -190,8 +177,6 @@
uint16_t inst_data;
const void* const* currentHandlersTable;
UPDATE_HANDLER_TABLE();
- std::unique_ptr<lambda::ClosureBuilder> lambda_closure_builder;
- size_t lambda_captured_variable_index = 0;
const auto* const instrumentation = Runtime::Current()->GetInstrumentation();
ArtMethod* method = shadow_frame.GetMethod();
jit::Jit* jit = Runtime::Current()->GetJit();
@@ -1668,14 +1653,6 @@
}
HANDLE_INSTRUCTION_END();
- HANDLE_EXPERIMENTAL_INSTRUCTION_START(INVOKE_LAMBDA) {
- bool success = DoInvokeLambda<do_access_check>(self, shadow_frame, inst, inst_data,
- &result_register);
- UPDATE_HANDLER_TABLE();
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_EXPERIMENTAL_INSTRUCTION_END();
-
HANDLE_INSTRUCTION_START(NEG_INT)
shadow_frame.SetVReg(
inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
@@ -2457,62 +2434,6 @@
ADVANCE(2);
HANDLE_INSTRUCTION_END();
- HANDLE_EXPERIMENTAL_INSTRUCTION_START(CREATE_LAMBDA) {
- if (lambda_closure_builder == nullptr) {
- // DoCreateLambda always needs a ClosureBuilder, even if it has 0 captured variables.
- lambda_closure_builder = MakeUnique<lambda::ClosureBuilder>();
- }
-
- // TODO: these allocations should not leak, and the lambda method should not be local.
- lambda::Closure* lambda_closure =
- reinterpret_cast<lambda::Closure*>(alloca(lambda_closure_builder->GetSize()));
- bool success = DoCreateLambda<do_access_check>(self,
- inst,
- /*inout*/shadow_frame,
- /*inout*/lambda_closure_builder.get(),
- /*inout*/lambda_closure);
- lambda_closure_builder.reset(nullptr); // reset state of variables captured
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_EXPERIMENTAL_INSTRUCTION_END();
-
- HANDLE_EXPERIMENTAL_INSTRUCTION_START(BOX_LAMBDA) {
- bool success = DoBoxLambda<do_access_check>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_EXPERIMENTAL_INSTRUCTION_END();
-
- HANDLE_EXPERIMENTAL_INSTRUCTION_START(UNBOX_LAMBDA) {
- bool success = DoUnboxLambda<do_access_check>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_EXPERIMENTAL_INSTRUCTION_END();
-
- HANDLE_EXPERIMENTAL_INSTRUCTION_START(CAPTURE_VARIABLE) {
- if (lambda_closure_builder == nullptr) {
- lambda_closure_builder = MakeUnique<lambda::ClosureBuilder>();
- }
-
- bool success = DoCaptureVariable<do_access_check>(self,
- inst,
- /*inout*/shadow_frame,
- /*inout*/lambda_closure_builder.get());
-
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_EXPERIMENTAL_INSTRUCTION_END();
-
- HANDLE_EXPERIMENTAL_INSTRUCTION_START(LIBERATE_VARIABLE) {
- bool success = DoLiberateVariable<do_access_check>(self,
- inst,
- lambda_captured_variable_index,
- /*inout*/shadow_frame);
- // Temporarily only allow sequences of 'liberate-variable, liberate-variable, ...'
- lambda_captured_variable_index++;
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_EXPERIMENTAL_INSTRUCTION_END();
-
HANDLE_INSTRUCTION_START(UNUSED_3E)
UnexpectedOpcode(inst, shadow_frame);
HANDLE_INSTRUCTION_END();
@@ -2545,10 +2466,34 @@
UnexpectedOpcode(inst, shadow_frame);
HANDLE_INSTRUCTION_END();
+ HANDLE_INSTRUCTION_START(UNUSED_F3)
+ UnexpectedOpcode(inst, shadow_frame);
+ HANDLE_INSTRUCTION_END();
+
HANDLE_INSTRUCTION_START(UNUSED_F4)
UnexpectedOpcode(inst, shadow_frame);
HANDLE_INSTRUCTION_END();
+ HANDLE_INSTRUCTION_START(UNUSED_F5)
+ UnexpectedOpcode(inst, shadow_frame);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F6)
+ UnexpectedOpcode(inst, shadow_frame);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F7)
+ UnexpectedOpcode(inst, shadow_frame);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F8)
+ UnexpectedOpcode(inst, shadow_frame);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F9)
+ UnexpectedOpcode(inst, shadow_frame);
+ HANDLE_INSTRUCTION_END();
+
HANDLE_INSTRUCTION_START(UNUSED_FA)
UnexpectedOpcode(inst, shadow_frame);
HANDLE_INSTRUCTION_END();
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 8bfc10c..a6349fc 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -15,14 +15,11 @@
*/
#include "base/enums.h"
-#include "base/stl_util.h" // MakeUnique
#include "experimental_flags.h"
#include "interpreter_common.h"
#include "jit/jit.h"
#include "safe_math.h"
-#include <memory> // std::unique_ptr
-
namespace art {
namespace interpreter {
@@ -92,11 +89,6 @@
} \
} while (false)
-static bool IsExperimentalInstructionEnabled(const Instruction *inst) {
- DCHECK(inst->IsExperimental());
- return Runtime::Current()->AreExperimentalFlagsEnabled(ExperimentalFlags::kLambdas);
-}
-
template<bool do_access_check, bool transaction_active>
JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register,
@@ -116,10 +108,6 @@
ArtMethod* method = shadow_frame.GetMethod();
jit::Jit* jit = Runtime::Current()->GetJit();
- // TODO: collapse capture-variable+create-lambda into one opcode, then we won't need
- // to keep this live for the scope of the entire function call.
- std::unique_ptr<lambda::ClosureBuilder> lambda_closure_builder;
- size_t lambda_captured_variable_index = 0;
do {
dex_pc = inst->GetDexPc(insns);
shadow_frame.SetDexPC(dex_pc);
@@ -2333,105 +2321,13 @@
(inst->VRegC_22b() & 0x1f));
inst = inst->Next_2xx();
break;
- case Instruction::INVOKE_LAMBDA: {
- if (!IsExperimentalInstructionEnabled(inst)) {
- UnexpectedOpcode(inst, shadow_frame);
- }
-
- PREAMBLE();
- bool success = DoInvokeLambda<do_access_check>(self, shadow_frame, inst, inst_data,
- &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::CAPTURE_VARIABLE: {
- if (!IsExperimentalInstructionEnabled(inst)) {
- UnexpectedOpcode(inst, shadow_frame);
- }
-
- if (lambda_closure_builder == nullptr) {
- lambda_closure_builder = MakeUnique<lambda::ClosureBuilder>();
- }
-
- PREAMBLE();
- bool success = DoCaptureVariable<do_access_check>(self,
- inst,
- /*inout*/shadow_frame,
- /*inout*/lambda_closure_builder.get());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::CREATE_LAMBDA: {
- if (!IsExperimentalInstructionEnabled(inst)) {
- UnexpectedOpcode(inst, shadow_frame);
- }
-
- PREAMBLE();
-
- if (lambda_closure_builder == nullptr) {
- // DoCreateLambda always needs a ClosureBuilder, even if it has 0 captured variables.
- lambda_closure_builder = MakeUnique<lambda::ClosureBuilder>();
- }
-
- // TODO: these allocations should not leak, and the lambda method should not be local.
- lambda::Closure* lambda_closure =
- reinterpret_cast<lambda::Closure*>(alloca(lambda_closure_builder->GetSize()));
- bool success = DoCreateLambda<do_access_check>(self,
- inst,
- /*inout*/shadow_frame,
- /*inout*/lambda_closure_builder.get(),
- /*inout*/lambda_closure);
- lambda_closure_builder.reset(nullptr); // reset state of variables captured
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::LIBERATE_VARIABLE: {
- if (!IsExperimentalInstructionEnabled(inst)) {
- UnexpectedOpcode(inst, shadow_frame);
- }
-
- PREAMBLE();
- bool success = DoLiberateVariable<do_access_check>(self,
- inst,
- lambda_captured_variable_index,
- /*inout*/shadow_frame);
- // Temporarily only allow sequences of 'liberate-variable, liberate-variable, ...'
- lambda_captured_variable_index++;
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::UNUSED_F4: {
- if (!IsExperimentalInstructionEnabled(inst)) {
- UnexpectedOpcode(inst, shadow_frame);
- }
-
- CHECK(false); // TODO(iam): Implement opcodes for lambdas
- break;
- }
- case Instruction::BOX_LAMBDA: {
- if (!IsExperimentalInstructionEnabled(inst)) {
- UnexpectedOpcode(inst, shadow_frame);
- }
-
- PREAMBLE();
- bool success = DoBoxLambda<do_access_check>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::UNBOX_LAMBDA: {
- if (!IsExperimentalInstructionEnabled(inst)) {
- UnexpectedOpcode(inst, shadow_frame);
- }
-
- PREAMBLE();
- bool success = DoUnboxLambda<do_access_check>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
- case Instruction::UNUSED_FA ... Instruction::UNUSED_FF:
+ case Instruction::UNUSED_F3 ... Instruction::UNUSED_F9:
+ case Instruction::UNUSED_FC ... Instruction::UNUSED_FF:
case Instruction::UNUSED_79:
case Instruction::UNUSED_7A:
+ case Instruction::INVOKE_POLYMORPHIC:
+ case Instruction::INVOKE_POLYMORPHIC_RANGE:
UnexpectedOpcode(inst, shadow_frame);
}
} while (!interpret_one_instruction);
diff --git a/runtime/interpreter/mterp/config_arm b/runtime/interpreter/mterp/config_arm
index 436dcd2..b6caf11 100644
--- a/runtime/interpreter/mterp/config_arm
+++ b/runtime/interpreter/mterp/config_arm
@@ -279,13 +279,13 @@
# op op_iget_byte_quick FALLBACK
# op op_iget_char_quick FALLBACK
# op op_iget_short_quick FALLBACK
- op op_invoke_lambda FALLBACK
+ # op op_unused_f3 FALLBACK
# op op_unused_f4 FALLBACK
- op op_capture_variable FALLBACK
- op op_create_lambda FALLBACK
- op op_liberate_variable FALLBACK
- op op_box_lambda FALLBACK
- op op_unbox_lambda FALLBACK
+ # op op_unused_f5 FALLBACK
+ # op op_unused_f6 FALLBACK
+ # op op_unused_f7 FALLBACK
+ # op op_unused_f8 FALLBACK
+ # op op_unused_f9 FALLBACK
# op op_unused_fa FALLBACK
# op op_unused_fb FALLBACK
# op op_unused_fc FALLBACK
diff --git a/runtime/interpreter/mterp/config_arm64 b/runtime/interpreter/mterp/config_arm64
index 6427ead..c5e06c7 100644
--- a/runtime/interpreter/mterp/config_arm64
+++ b/runtime/interpreter/mterp/config_arm64
@@ -277,13 +277,13 @@
# op op_iget_byte_quick FALLBACK
# op op_iget_char_quick FALLBACK
# op op_iget_short_quick FALLBACK
- op op_invoke_lambda FALLBACK
+ # op op_unused_f3 FALLBACK
# op op_unused_f4 FALLBACK
- op op_capture_variable FALLBACK
- op op_create_lambda FALLBACK
- op op_liberate_variable FALLBACK
- op op_box_lambda FALLBACK
- op op_unbox_lambda FALLBACK
+ # op op_unused_f5 FALLBACK
+ # op op_unused_f6 FALLBACK
+ # op op_unused_f7 FALLBACK
+ # op op_unused_f8 FALLBACK
+ # op op_unused_f9 FALLBACK
# op op_unused_fa FALLBACK
# op op_unused_fb FALLBACK
# op op_unused_fc FALLBACK
diff --git a/runtime/interpreter/mterp/config_mips b/runtime/interpreter/mterp/config_mips
index c6292c3..515cb0b 100644
--- a/runtime/interpreter/mterp/config_mips
+++ b/runtime/interpreter/mterp/config_mips
@@ -279,13 +279,13 @@
# op op_iget_byte_quick FALLBACK
# op op_iget_char_quick FALLBACK
# op op_iget_short_quick FALLBACK
- op op_invoke_lambda FALLBACK
+ # op op_unused_f3 FALLBACK
# op op_unused_f4 FALLBACK
- op op_capture_variable FALLBACK
- op op_create_lambda FALLBACK
- op op_liberate_variable FALLBACK
- op op_box_lambda FALLBACK
- op op_unbox_lambda FALLBACK
+ # op op_unused_f5 FALLBACK
+ # op op_unused_f6 FALLBACK
+ # op op_unused_f7 FALLBACK
+ # op op_unused_f8 FALLBACK
+ # op op_unused_f9 FALLBACK
# op op_unused_fa FALLBACK
# op op_unused_fb FALLBACK
# op op_unused_fc FALLBACK
diff --git a/runtime/interpreter/mterp/config_mips64 b/runtime/interpreter/mterp/config_mips64
index c40c007..aafd248 100644
--- a/runtime/interpreter/mterp/config_mips64
+++ b/runtime/interpreter/mterp/config_mips64
@@ -279,13 +279,13 @@
# op op_iget_byte_quick FALLBACK
# op op_iget_char_quick FALLBACK
# op op_iget_short_quick FALLBACK
- op op_invoke_lambda FALLBACK
+ # op op_unused_f3 FALLBACK
# op op_unused_f4 FALLBACK
- op op_capture_variable FALLBACK
- op op_create_lambda FALLBACK
- op op_liberate_variable FALLBACK
- op op_box_lambda FALLBACK
- op op_unbox_lambda FALLBACK
+ # op op_unused_f5 FALLBACK
+ # op op_unused_f6 FALLBACK
+ # op op_unused_f7 FALLBACK
+ # op op_unused_f8 FALLBACK
+ # op op_unused_f9 FALLBACK
# op op_unused_fa FALLBACK
# op op_unused_fb FALLBACK
# op op_unused_fc FALLBACK
diff --git a/runtime/interpreter/mterp/config_x86 b/runtime/interpreter/mterp/config_x86
index f1501e1..64d8ee8 100644
--- a/runtime/interpreter/mterp/config_x86
+++ b/runtime/interpreter/mterp/config_x86
@@ -283,13 +283,13 @@
# op op_iget_byte_quick FALLBACK
# op op_iget_char_quick FALLBACK
# op op_iget_short_quick FALLBACK
- op op_invoke_lambda FALLBACK
+ # op op_unused_f3 FALLBACK
# op op_unused_f4 FALLBACK
- op op_capture_variable FALLBACK
- op op_create_lambda FALLBACK
- op op_liberate_variable FALLBACK
- op op_box_lambda FALLBACK
- op op_unbox_lambda FALLBACK
+ # op op_unused_f5 FALLBACK
+ # op op_unused_f6 FALLBACK
+ # op op_unused_f7 FALLBACK
+ # op op_unused_f8 FALLBACK
+ # op op_unused_f9 FALLBACK
# op op_unused_fa FALLBACK
# op op_unused_fb FALLBACK
# op op_unused_fc FALLBACK
diff --git a/runtime/interpreter/mterp/config_x86_64 b/runtime/interpreter/mterp/config_x86_64
index 1d7eb03..7c357db 100644
--- a/runtime/interpreter/mterp/config_x86_64
+++ b/runtime/interpreter/mterp/config_x86_64
@@ -283,13 +283,13 @@
# op op_iget_byte_quick FALLBACK
# op op_iget_char_quick FALLBACK
# op op_iget_short_quick FALLBACK
- op op_invoke_lambda FALLBACK
+ # op op_unused_f3 FALLBACK
# op op_unused_f4 FALLBACK
- op op_capture_variable FALLBACK
- op op_create_lambda FALLBACK
- op op_liberate_variable FALLBACK
- op op_box_lambda FALLBACK
- op op_unbox_lambda FALLBACK
+ # op op_unused_f5 FALLBACK
+ # op op_unused_f6 FALLBACK
+ # op op_unused_f7 FALLBACK
+ # op op_unused_f8 FALLBACK
+ # op op_unused_f9 FALLBACK
# op op_unused_fa FALLBACK
# op op_unused_fb FALLBACK
# op op_unused_fc FALLBACK
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f3.S b/runtime/interpreter/mterp/mips64/op_unused_f3.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_f3.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f5.S b/runtime/interpreter/mterp/mips64/op_unused_f5.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_f5.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f6.S b/runtime/interpreter/mterp/mips64/op_unused_f6.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_f6.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f7.S b/runtime/interpreter/mterp/mips64/op_unused_f7.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_f7.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f8.S b/runtime/interpreter/mterp/mips64/op_unused_f8.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_f8.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f9.S b/runtime/interpreter/mterp/mips64/op_unused_f9.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_f9.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index 02b462f..c33df6d 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -7228,9 +7228,13 @@
/* ------------------------------ */
.balign 128
-.L_op_invoke_lambda: /* 0xf3 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f3: /* 0xf3 */
+/* File: arm/op_unused_f3.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
@@ -7246,37 +7250,57 @@
/* ------------------------------ */
.balign 128
-.L_op_capture_variable: /* 0xf5 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f5: /* 0xf5 */
+/* File: arm/op_unused_f5.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_create_lambda: /* 0xf6 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f6: /* 0xf6 */
+/* File: arm/op_unused_f6.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_liberate_variable: /* 0xf7 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f7: /* 0xf7 */
+/* File: arm/op_unused_f7.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_box_lambda: /* 0xf8 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f8: /* 0xf8 */
+/* File: arm/op_unused_f8.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unbox_lambda: /* 0xf9 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f9: /* 0xf9 */
+/* File: arm/op_unused_f9.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
@@ -11591,7 +11615,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_invoke_lambda: /* 0xf3 */
+.L_ALT_op_unused_f3: /* 0xf3 */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11625,7 +11649,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_capture_variable: /* 0xf5 */
+.L_ALT_op_unused_f5: /* 0xf5 */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11642,7 +11666,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_create_lambda: /* 0xf6 */
+.L_ALT_op_unused_f6: /* 0xf6 */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11659,7 +11683,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_liberate_variable: /* 0xf7 */
+.L_ALT_op_unused_f7: /* 0xf7 */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11676,7 +11700,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_box_lambda: /* 0xf8 */
+.L_ALT_op_unused_f8: /* 0xf8 */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11693,7 +11717,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_unbox_lambda: /* 0xf9 */
+.L_ALT_op_unused_f9: /* 0xf9 */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index 0a99802..c7303b9 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -6785,9 +6785,13 @@
/* ------------------------------ */
.balign 128
-.L_op_invoke_lambda: /* 0xf3 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f3: /* 0xf3 */
+/* File: arm64/op_unused_f3.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
@@ -6803,37 +6807,57 @@
/* ------------------------------ */
.balign 128
-.L_op_capture_variable: /* 0xf5 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f5: /* 0xf5 */
+/* File: arm64/op_unused_f5.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_create_lambda: /* 0xf6 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f6: /* 0xf6 */
+/* File: arm64/op_unused_f6.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_liberate_variable: /* 0xf7 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f7: /* 0xf7 */
+/* File: arm64/op_unused_f7.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_box_lambda: /* 0xf8 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f8: /* 0xf8 */
+/* File: arm64/op_unused_f8.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unbox_lambda: /* 0xf9 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f9: /* 0xf9 */
+/* File: arm64/op_unused_f9.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
@@ -11376,7 +11400,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_invoke_lambda: /* 0xf3 */
+.L_ALT_op_unused_f3: /* 0xf3 */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11410,7 +11434,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_capture_variable: /* 0xf5 */
+.L_ALT_op_unused_f5: /* 0xf5 */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11427,7 +11451,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_create_lambda: /* 0xf6 */
+.L_ALT_op_unused_f6: /* 0xf6 */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11444,7 +11468,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_liberate_variable: /* 0xf7 */
+.L_ALT_op_unused_f7: /* 0xf7 */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11461,7 +11485,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_box_lambda: /* 0xf8 */
+.L_ALT_op_unused_f8: /* 0xf8 */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11478,7 +11502,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_unbox_lambda: /* 0xf9 */
+.L_ALT_op_unused_f9: /* 0xf9 */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index 5e0c19f..fef7dc6 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -7547,9 +7547,14 @@
/* ------------------------------ */
.balign 128
-.L_op_invoke_lambda: /* 0xf3 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f3: /* 0xf3 */
+/* File: mips/op_unused_f3.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
/* ------------------------------ */
.balign 128
@@ -7564,33 +7569,58 @@
/* ------------------------------ */
.balign 128
-.L_op_capture_variable: /* 0xf5 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f5: /* 0xf5 */
+/* File: mips/op_unused_f5.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_create_lambda: /* 0xf6 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f6: /* 0xf6 */
+/* File: mips/op_unused_f6.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_liberate_variable: /* 0xf7 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f7: /* 0xf7 */
+/* File: mips/op_unused_f7.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_box_lambda: /* 0xf8 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f8: /* 0xf8 */
+/* File: mips/op_unused_f8.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_unbox_lambda: /* 0xf9 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f9: /* 0xf9 */
+/* File: mips/op_unused_f9.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
/* ------------------------------ */
.balign 128
@@ -12381,7 +12411,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_invoke_lambda: /* 0xf3 */
+.L_ALT_op_unused_f3: /* 0xf3 */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12417,7 +12447,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_capture_variable: /* 0xf5 */
+.L_ALT_op_unused_f5: /* 0xf5 */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12435,7 +12465,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_create_lambda: /* 0xf6 */
+.L_ALT_op_unused_f6: /* 0xf6 */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12453,7 +12483,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_liberate_variable: /* 0xf7 */
+.L_ALT_op_unused_f7: /* 0xf7 */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12471,7 +12501,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_box_lambda: /* 0xf8 */
+.L_ALT_op_unused_f8: /* 0xf8 */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12489,7 +12519,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_unbox_lambda: /* 0xf9 */
+.L_ALT_op_unused_f9: /* 0xf9 */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index 35fbe94..a061f1e 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -7003,10 +7003,15 @@
/* ------------------------------ */
.balign 128
-.L_op_invoke_lambda: /* 0xf3 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f3: /* 0xf3 */
+/* File: mips64/op_unused_f3.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
b MterpFallback
+
/* ------------------------------ */
.balign 128
.L_op_unused_f4: /* 0xf4 */
@@ -7020,34 +7025,59 @@
/* ------------------------------ */
.balign 128
-.L_op_capture_variable: /* 0xf5 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f5: /* 0xf5 */
+/* File: mips64/op_unused_f5.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_create_lambda: /* 0xf6 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f6: /* 0xf6 */
+/* File: mips64/op_unused_f6.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_liberate_variable: /* 0xf7 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f7: /* 0xf7 */
+/* File: mips64/op_unused_f7.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_box_lambda: /* 0xf8 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f8: /* 0xf8 */
+/* File: mips64/op_unused_f8.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_unbox_lambda: /* 0xf9 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f9: /* 0xf9 */
+/* File: mips64/op_unused_f9.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
b MterpFallback
+
/* ------------------------------ */
.balign 128
.L_op_unused_fa: /* 0xfa */
@@ -11799,7 +11829,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_invoke_lambda: /* 0xf3 */
+.L_ALT_op_unused_f3: /* 0xf3 */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11837,7 +11867,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_capture_variable: /* 0xf5 */
+.L_ALT_op_unused_f5: /* 0xf5 */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11856,7 +11886,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_create_lambda: /* 0xf6 */
+.L_ALT_op_unused_f6: /* 0xf6 */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11875,7 +11905,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_liberate_variable: /* 0xf7 */
+.L_ALT_op_unused_f7: /* 0xf7 */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11894,7 +11924,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_box_lambda: /* 0xf8 */
+.L_ALT_op_unused_f8: /* 0xf8 */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11913,7 +11943,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_unbox_lambda: /* 0xf9 */
+.L_ALT_op_unused_f9: /* 0xf9 */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index 5caaa80..29ee248 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -6201,8 +6201,12 @@
/* ------------------------------ */
.balign 128
-.L_op_invoke_lambda: /* 0xf3 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f3: /* 0xf3 */
+/* File: x86/op_unused_f3.S */
+/* File: x86/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
@@ -6219,36 +6223,56 @@
/* ------------------------------ */
.balign 128
-.L_op_capture_variable: /* 0xf5 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f5: /* 0xf5 */
+/* File: x86/op_unused_f5.S */
+/* File: x86/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_create_lambda: /* 0xf6 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f6: /* 0xf6 */
+/* File: x86/op_unused_f6.S */
+/* File: x86/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_liberate_variable: /* 0xf7 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f7: /* 0xf7 */
+/* File: x86/op_unused_f7.S */
+/* File: x86/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_box_lambda: /* 0xf8 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f8: /* 0xf8 */
+/* File: x86/op_unused_f8.S */
+/* File: x86/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unbox_lambda: /* 0xf9 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f9: /* 0xf9 */
+/* File: x86/op_unused_f9.S */
+/* File: x86/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
@@ -12178,7 +12202,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_invoke_lambda: /* 0xf3 */
+.L_ALT_op_unused_f3: /* 0xf3 */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12226,7 +12250,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_capture_variable: /* 0xf5 */
+.L_ALT_op_unused_f5: /* 0xf5 */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12250,7 +12274,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_create_lambda: /* 0xf6 */
+.L_ALT_op_unused_f6: /* 0xf6 */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12274,7 +12298,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_liberate_variable: /* 0xf7 */
+.L_ALT_op_unused_f7: /* 0xf7 */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12298,7 +12322,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_box_lambda: /* 0xf8 */
+.L_ALT_op_unused_f8: /* 0xf8 */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12322,7 +12346,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_unbox_lambda: /* 0xf9 */
+.L_ALT_op_unused_f9: /* 0xf9 */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
index 2f7b854..bc1abcc 100644
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ b/runtime/interpreter/mterp/out/mterp_x86_64.S
@@ -5966,8 +5966,12 @@
/* ------------------------------ */
.balign 128
-.L_op_invoke_lambda: /* 0xf3 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f3: /* 0xf3 */
+/* File: x86_64/op_unused_f3.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
@@ -5984,36 +5988,56 @@
/* ------------------------------ */
.balign 128
-.L_op_capture_variable: /* 0xf5 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f5: /* 0xf5 */
+/* File: x86_64/op_unused_f5.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_create_lambda: /* 0xf6 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f6: /* 0xf6 */
+/* File: x86_64/op_unused_f6.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_liberate_variable: /* 0xf7 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f7: /* 0xf7 */
+/* File: x86_64/op_unused_f7.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_box_lambda: /* 0xf8 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f8: /* 0xf8 */
+/* File: x86_64/op_unused_f8.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unbox_lambda: /* 0xf9 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f9: /* 0xf9 */
+/* File: x86_64/op_unused_f9.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
@@ -11457,7 +11481,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_invoke_lambda: /* 0xf3 */
+.L_ALT_op_unused_f3: /* 0xf3 */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11501,7 +11525,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_capture_variable: /* 0xf5 */
+.L_ALT_op_unused_f5: /* 0xf5 */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11523,7 +11547,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_create_lambda: /* 0xf6 */
+.L_ALT_op_unused_f6: /* 0xf6 */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11545,7 +11569,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_liberate_variable: /* 0xf7 */
+.L_ALT_op_unused_f7: /* 0xf7 */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11567,7 +11591,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_box_lambda: /* 0xf8 */
+.L_ALT_op_unused_f8: /* 0xf8 */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11589,7 +11613,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_unbox_lambda: /* 0xf9 */
+.L_ALT_op_unused_f9: /* 0xf9 */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/x86/op_unused_f3.S b/runtime/interpreter/mterp/x86/op_unused_f3.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_unused_f3.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f5.S b/runtime/interpreter/mterp/x86/op_unused_f5.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_unused_f5.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f6.S b/runtime/interpreter/mterp/x86/op_unused_f6.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_unused_f6.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f7.S b/runtime/interpreter/mterp/x86/op_unused_f7.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_unused_f7.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f8.S b/runtime/interpreter/mterp/x86/op_unused_f8.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_unused_f8.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f9.S b/runtime/interpreter/mterp/x86/op_unused_f9.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_unused_f9.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f3.S b/runtime/interpreter/mterp/x86_64/op_unused_f3.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_f3.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f5.S b/runtime/interpreter/mterp/x86_64/op_unused_f5.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_f5.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f6.S b/runtime/interpreter/mterp/x86_64/op_unused_f6.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_f6.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f7.S b/runtime/interpreter/mterp/x86_64/op_unused_f7.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_f7.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f8.S b/runtime/interpreter/mterp/x86_64/op_unused_f8.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_f8.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f9.S b/runtime/interpreter/mterp/x86_64/op_unused_f9.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_f9.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 57443f1..a0e0e62 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -41,6 +41,7 @@
#include "mirror/array-inl.h"
#include "mirror/class.h"
#include "mirror/field-inl.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index d52030f..cff2354 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -692,9 +692,6 @@
DCHECK(this_object != nullptr);
ProfilingInfo* info = caller->GetProfilingInfo(kRuntimePointerSize);
if (info != nullptr) {
- // Since the instrumentation is marked from the declaring class we need to mark the card so
- // that mod-union tables and card rescanning know about the update.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(caller->GetDeclaringClass());
info->AddInvokeInfo(dex_pc, this_object->GetClass());
}
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 6dc1578..1938221 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -146,7 +146,6 @@
// Remove all methods in our cache that were allocated by 'alloc'.
void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
REQUIRES(!lock_)
- REQUIRES(Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_);
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 5a469e5..b35c958 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -176,14 +176,13 @@
MutexLock wait_mutex(Thread::Current(), wait_lock_);
if ((NanoTime() - last_time_ns_saver_woke_up_) > MsToNs(options_.GetMinSavePeriodMs())) {
WakeUpSaver();
+ } else if (jit_activity_notifications_ > options_.GetMaxNotificationBeforeWake()) {
+ // Make sure to wake up the saver if we see a spike in the number of notifications.
+ // This is a precaution to avoid losing a big number of methods in case
+ // this is a spike with no jit after.
+ total_number_of_hot_spikes_++;
+ WakeUpSaver();
}
- } else if (jit_activity_notifications_ > options_.GetMaxNotificationBeforeWake()) {
- // Make sure to wake up the saver if we see a spike in the number of notifications.
- // This is a precaution to avoid "loosing" a big number of methods in case
- // this is a spike with no jit after.
- total_number_of_hot_spikes_++;
- MutexLock wait_mutex(Thread::Current(), wait_lock_);
- WakeUpSaver();
}
}
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 07c8051..216df2f 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -25,10 +25,33 @@
namespace art {
+ProfilingInfo::ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
+ : number_of_inline_caches_(entries.size()),
+ method_(method),
+ is_method_being_compiled_(false),
+ is_osr_method_being_compiled_(false),
+ current_inline_uses_(0),
+ saved_entry_point_(nullptr) {
+ memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
+ for (size_t i = 0; i < number_of_inline_caches_; ++i) {
+ cache_[i].dex_pc_ = entries[i];
+ }
+ if (method->IsCopied()) {
+ // GetHoldingClassOfCopiedMethod is expensive, but creating a profiling info for a copied method
+ // appears to happen very rarely in practice.
+ holding_class_ = GcRoot<mirror::Class>(
+ Runtime::Current()->GetClassLinker()->GetHoldingClassOfCopiedMethod(method));
+ } else {
+ holding_class_ = GcRoot<mirror::Class>(method->GetDeclaringClass());
+ }
+ DCHECK(!holding_class_.IsNull());
+}
+
bool ProfilingInfo::Create(Thread* self, ArtMethod* method, bool retry_allocation) {
// Walk over the dex instructions of the method and keep track of
// instructions we are interested in profiling.
DCHECK(!method->IsNative());
+
const DexFile::CodeItem& code_item = *method->GetCodeItem();
const uint16_t* code_ptr = code_item.insns_;
const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
@@ -93,6 +116,14 @@
--i;
} else {
// We successfully set `cls`, just return.
+ // Since the instrumentation is marked from the declaring class we need to mark the card so
+ // that mod-union tables and card rescanning know about the update.
+ // Note that the declaring class is not necessarily the holding class if the method is
+ // copied. We need the card mark to be in the holding class since that is from where we
+ // will visit the profiling info.
+ if (!holding_class_.IsNull()) {
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(holding_class_.Read());
+ }
return;
}
}
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index d04d2de..a890fbb 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -105,6 +105,7 @@
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<typename RootVisitorType>
void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS {
+ visitor.VisitRootIfNonNull(holding_class_.AddressWithoutBarrier());
for (size_t i = 0; i < number_of_inline_caches_; ++i) {
InlineCache* cache = &cache_[i];
for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
@@ -166,18 +167,7 @@
}
private:
- ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
- : number_of_inline_caches_(entries.size()),
- method_(method),
- is_method_being_compiled_(false),
- is_osr_method_being_compiled_(false),
- current_inline_uses_(0),
- saved_entry_point_(nullptr) {
- memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
- for (size_t i = 0; i < number_of_inline_caches_; ++i) {
- cache_[i].dex_pc_ = entries[i];
- }
- }
+ ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries);
// Number of instructions we are profiling in the ArtMethod.
const uint32_t number_of_inline_caches_;
@@ -185,6 +175,9 @@
// Method this profiling info is for.
ArtMethod* const method_;
+ // Holding class for the method in case method is a copied method.
+ GcRoot<mirror::Class> holding_class_;
+
// Whether the ArtMethod is currently being compiled. This flag
// is implicitly guarded by the JIT code cache lock.
// TODO: Make the JIT code cache lock global.
diff --git a/runtime/lambda/art_lambda_method.cc b/runtime/lambda/art_lambda_method.cc
deleted file mode 100644
index 6f9f8bb..0000000
--- a/runtime/lambda/art_lambda_method.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "lambda/art_lambda_method.h"
-
-#include "base/logging.h"
-#include "lambda/shorty_field_type.h"
-
-namespace art {
-namespace lambda {
-
-ArtLambdaMethod::ArtLambdaMethod(ArtMethod* target_method,
- const char* captured_variables_type_descriptor,
- const char* captured_variables_shorty,
- bool innate_lambda)
- : method_(target_method),
- captured_variables_type_descriptor_(captured_variables_type_descriptor),
- captured_variables_shorty_(captured_variables_shorty),
- innate_lambda_(innate_lambda) {
- DCHECK(target_method != nullptr);
- DCHECK(captured_variables_type_descriptor != nullptr);
- DCHECK(captured_variables_shorty != nullptr);
-
- // Calculate the static closure size from the captured variables.
- size_t size = sizeof(ArtLambdaMethod*); // Initial size is just this method.
- bool static_size = true;
- const char* shorty = captured_variables_shorty_;
- while (shorty != nullptr && *shorty != '\0') {
- // Each captured variable also appends to the size.
- ShortyFieldType shorty_field{*shorty}; // NOLINT [readability/braces] [4]
- size += shorty_field.GetStaticSize();
- static_size &= shorty_field.IsStaticSize();
- ++shorty;
- }
- closure_size_ = size;
-
- // We determine whether or not the size is dynamic by checking for nested lambdas.
- //
- // This is conservative, since in theory an optimization could determine the size
- // of the nested lambdas recursively. In practice it's probably better to flatten out
- // nested lambdas and inline all their code if they are known statically.
- dynamic_size_ = !static_size;
-
- if (kIsDebugBuild) {
- // Double check that the number of captured variables match in both strings.
- size_t shorty_count = strlen(captured_variables_shorty);
-
- size_t long_count = 0;
- const char* long_type = captured_variables_type_descriptor;
- ShortyFieldType out;
- while ((long_type = ShortyFieldType::ParseFromFieldTypeDescriptor(long_type, &out))
- != nullptr) {
- ++long_count;
- }
-
- DCHECK_EQ(shorty_count, long_count)
- << "number of captured variables in long type '" << captured_variables_type_descriptor
- << "' (" << long_count << ")" << " did not match short type '"
- << captured_variables_shorty << "' (" << shorty_count << ")";
- }
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/art_lambda_method.h b/runtime/lambda/art_lambda_method.h
deleted file mode 100644
index ea13eb7..0000000
--- a/runtime/lambda/art_lambda_method.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_ART_LAMBDA_METHOD_H_
-#define ART_RUNTIME_LAMBDA_ART_LAMBDA_METHOD_H_
-
-#include "base/macros.h"
-#include "art_method.h"
-
-#include <stdint.h>
-
-namespace art {
-namespace lambda {
-
-class ArtLambdaMethod {
- public:
- // Construct an art lambda method.
- // The target method is the one invoked by invoke-lambda.
- // The type descriptor describes the types of variables captured, e.g. "ZFLObject;\FI;[Z"
- // The shorty drops the object name and treats arrays as objects, e.g. "ZFL\L"
- // Innate lambda means that the lambda was originally created via invoke-lambda.
- // -- Non-innate lambdas (learned lambdas) come from a regular class that was boxed to lambda.
- // (Ownership of strings is retained by the caller and the lifetime should exceed this class).
- ArtLambdaMethod(ArtMethod* target_method,
- const char* captured_variables_type_descriptor,
- const char* captured_variables_shorty,
- bool innate_lambda = true);
-
- // Get the target method for this lambda that would be used by the invoke-lambda dex instruction.
- ArtMethod* GetArtMethod() const {
- return method_;
- }
-
- // Get the compile-time size of lambda closures for this method in bytes.
- // This is circular (that is, it includes the size of the ArtLambdaMethod pointer).
- // One should also check if the size is dynamic since nested lambdas have a runtime size.
- size_t GetStaticClosureSize() const {
- return closure_size_;
- }
-
- // Get the type descriptor for the list of captured variables.
- // e.g. "ZFLObject;\FI;[Z" means a captured int, float, class Object, lambda FI, array of ints
- const char* GetCapturedVariablesTypeDescriptor() const {
- return captured_variables_type_descriptor_;
- }
-
- // Get the shorty 'field' type descriptor list of captured variables.
- // This follows the same rules as a string of ShortyFieldType in the dex specification.
- // Every captured variable is represented by exactly one character.
- // - Objects become 'L'.
- // - Arrays become 'L'.
- // - Lambdas become '\'.
- const char* GetCapturedVariablesShortyTypeDescriptor() const {
- return captured_variables_shorty_;
- }
-
- // Will the size of this lambda change at runtime?
- // Only returns true if there is a nested lambda that we can't determine statically the size of.
- bool IsDynamicSize() const {
- return dynamic_size_;
- }
-
- // Will the size of this lambda always be constant at runtime?
- // This generally means there's no nested lambdas, or we were able to successfully determine
- // their size statically at compile time.
- bool IsStaticSize() const {
- return !IsDynamicSize();
- }
- // Is this a lambda that was originally created via invoke-lambda?
- // -- Non-innate lambdas (learned lambdas) come from a regular class that was boxed to lambda.
- bool IsInnateLambda() const {
- return innate_lambda_;
- }
-
- // How many variables were captured?
- // (Each nested lambda counts as 1 captured var regardless of how many captures it itself has).
- size_t GetNumberOfCapturedVariables() const {
- return strlen(captured_variables_shorty_);
- }
-
- private:
- // TODO: ArtMethod, or at least the entry points should be inlined into this struct
- // to avoid an extra indirect load when doing invokes.
- // Target method that invoke-lambda will jump to.
- ArtMethod* method_;
- // How big the closure is (in bytes). Only includes the constant size.
- size_t closure_size_;
- // The type descriptor for the captured variables, e.g. "IS" for [int, short]
- const char* captured_variables_type_descriptor_;
- // The shorty type descriptor for captured vars, (e.g. using 'L' instead of 'LObject;')
- const char* captured_variables_shorty_;
- // Whether or not the size is dynamic. If it is, copiers need to read the Closure size at runtime.
- bool dynamic_size_;
- // True if this lambda was originally made with create-lambda,
- // false if it came from a class instance (through new-instance and then unbox-lambda).
- bool innate_lambda_;
-
- DISALLOW_COPY_AND_ASSIGN(ArtLambdaMethod);
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_ART_LAMBDA_METHOD_H_
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
deleted file mode 100644
index 9918bb7..0000000
--- a/runtime/lambda/box_table.cc
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "lambda/box_table.h"
-
-#include "base/mutex.h"
-#include "common_throws.h"
-#include "gc_root-inl.h"
-#include "lambda/closure.h"
-#include "lambda/leaking_allocator.h"
-#include "mirror/method.h"
-#include "mirror/object-inl.h"
-#include "thread.h"
-
-#include <vector>
-
-namespace art {
-namespace lambda {
-// Temporarily represent the lambda Closure as its raw bytes in an array.
-// TODO: Generate a proxy class for the closure when boxing the first time.
-using BoxedClosurePointerType = mirror::ByteArray*;
-
-static mirror::Class* GetBoxedClosureClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return mirror::ByteArray::GetArrayClass();
-}
-
-namespace {
- // Convenience functions to allocating/deleting box table copies of the closures.
- struct ClosureAllocator {
- // Deletes a Closure that was allocated through ::Allocate.
- static void Delete(Closure* ptr) {
- delete[] reinterpret_cast<char*>(ptr);
- }
-
- // Returns a well-aligned pointer to a newly allocated Closure on the 'new' heap.
- static Closure* Allocate(size_t size) {
- DCHECK_GE(size, sizeof(Closure));
-
- // TODO: Maybe point to the interior of the boxed closure object after we add proxy support?
- Closure* closure = reinterpret_cast<Closure*>(new char[size]);
- DCHECK_ALIGNED(closure, alignof(Closure));
- return closure;
- }
- };
-} // namespace
-
-BoxTable::BoxTable()
- : allow_new_weaks_(true),
- new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {}
-
-BoxTable::~BoxTable() {
- // Free all the copies of our closures.
- for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
- std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
-
- Closure* closure = key_value_pair.first;
-
- // Remove from the map first, so that it doesn't try to access dangling pointer.
- map_iterator = map_.Erase(map_iterator);
-
- // Safe to delete, no dangling pointers.
- ClosureAllocator::Delete(closure);
- }
-}
-
-mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
- Thread* self = Thread::Current();
-
- {
- // TODO: Switch to ReaderMutexLock if ConditionVariable ever supports RW Mutexes
- /*Reader*/MutexLock mu(self, *Locks::lambda_table_lock_);
- BlockUntilWeaksAllowed();
-
- // Attempt to look up this object, it's possible it was already boxed previously.
- // If this is the case we *must* return the same object as before to maintain
- // referential equality.
- //
- // In managed code:
- // Functional f = () -> 5; // vF = create-lambda
- // Object a = f; // vA = box-lambda vA
- // Object b = f; // vB = box-lambda vB
- // assert(a == f)
- ValueType value = FindBoxedLambda(closure);
- if (!value.IsNull()) {
- return value.Read();
- }
-
- // Otherwise we need to box ourselves and insert it into the hash map
- }
-
- // Release the lambda table lock here, so that thread suspension is allowed.
-
- // Convert the Closure into a managed byte[] which will serve
- // as the temporary 'boxed' version of the lambda. This is good enough
- // to check all the basic object identities that a boxed lambda must retain.
- // It's also good enough to contain all the captured primitive variables.
-
- // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
- // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
- BoxedClosurePointerType closure_as_array_object =
- mirror::ByteArray::Alloc(self, closure->GetSize());
-
- // There are no thread suspension points after this, so we don't need to put it into a handle.
-
- if (UNLIKELY(closure_as_array_object == nullptr)) {
- // Most likely an OOM has occurred.
- CHECK(self->IsExceptionPending());
- return nullptr;
- }
-
- // Write the raw closure data into the byte[].
- closure->CopyTo(closure_as_array_object->GetRawData(sizeof(uint8_t), // component size
- 0 /*index*/), // index
- closure_as_array_object->GetLength());
-
- // The method has been successfully boxed into an object, now insert it into the hash map.
- {
- MutexLock mu(self, *Locks::lambda_table_lock_);
- BlockUntilWeaksAllowed();
-
- // Lookup the object again, it's possible another thread already boxed it while
- // we were allocating the object before.
- ValueType value = FindBoxedLambda(closure);
- if (UNLIKELY(!value.IsNull())) {
- // Let the GC clean up method_as_object at a later time.
- return value.Read();
- }
-
- // Otherwise we need to insert it into the hash map in this thread.
-
- // Make a copy for the box table to keep, in case the closure gets collected from the stack.
- // TODO: GC may need to sweep for roots in the box table's copy of the closure.
- Closure* closure_table_copy = ClosureAllocator::Allocate(closure->GetSize());
- closure->CopyTo(closure_table_copy, closure->GetSize());
-
- // The closure_table_copy needs to be deleted by us manually when we erase it from the map.
-
- // Actually insert into the table.
- map_.Insert({closure_table_copy, ValueType(closure_as_array_object)});
- }
-
- return closure_as_array_object;
-}
-
-bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
- DCHECK(object != nullptr);
- *out_closure = nullptr;
-
- Thread* self = Thread::Current();
-
- // Note that we do not need to access lambda_table_lock_ here
- // since we don't need to look at the map.
-
- mirror::Object* boxed_closure_object = object;
-
- // Raise ClassCastException if object is not instanceof byte[]
- if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureClass()))) {
- ThrowClassCastException(GetBoxedClosureClass(), boxed_closure_object->GetClass());
- return false;
- }
-
- // TODO(iam): We must check that the closure object extends/implements the type
- // specified in [type id]. This is not currently implemented since it's always a byte[].
-
- // If we got this far, the inputs are valid.
- // Shuffle the byte[] back into a raw closure, then allocate it, copy, and return it.
- BoxedClosurePointerType boxed_closure_as_array =
- down_cast<BoxedClosurePointerType>(boxed_closure_object);
-
- const int8_t* unaligned_interior_closure = boxed_closure_as_array->GetData();
-
- // Allocate a copy that can "escape" and copy the closure data into that.
- Closure* unboxed_closure =
- LeakingAllocator::MakeFlexibleInstance<Closure>(self, boxed_closure_as_array->GetLength());
- // TODO: don't just memcpy the closure, it's unsafe when we add references to the mix.
- memcpy(unboxed_closure, unaligned_interior_closure, boxed_closure_as_array->GetLength());
-
- DCHECK_EQ(unboxed_closure->GetSize(), static_cast<size_t>(boxed_closure_as_array->GetLength()));
-
- *out_closure = unboxed_closure;
- return true;
-}
-
-BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const {
- auto map_iterator = map_.Find(closure);
- if (map_iterator != map_.end()) {
- const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
- const ValueType& value = key_value_pair.second;
-
- DCHECK(!value.IsNull()); // Never store null boxes.
- return value;
- }
-
- return ValueType(nullptr);
-}
-
-void BoxTable::BlockUntilWeaksAllowed() {
- Thread* self = Thread::Current();
- while (UNLIKELY((!kUseReadBarrier && !allow_new_weaks_) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
- new_weaks_condition_.WaitHoldingLocks(self); // wait while holding mutator lock
- }
-}
-
-void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) {
- DCHECK(visitor != nullptr);
-
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::lambda_table_lock_);
-
- /*
- * Visit every weak root in our lambda box table.
- * Remove unmarked objects, update marked objects to new address.
- */
- std::vector<ClosureType> remove_list;
- for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
- std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
-
- const ValueType& old_value = key_value_pair.second;
-
- // This does not need a read barrier because this is called by GC.
- mirror::Object* old_value_raw = old_value.Read<kWithoutReadBarrier>();
- mirror::Object* new_value = visitor->IsMarked(old_value_raw);
-
- if (new_value == nullptr) {
- // The object has been swept away.
- const ClosureType& closure = key_value_pair.first;
-
- // Delete the entry from the map.
- map_iterator = map_.Erase(map_iterator);
-
- // Clean up the memory by deleting the closure.
- ClosureAllocator::Delete(closure);
-
- } else {
- // The object has been moved.
- // Update the map.
- key_value_pair.second = ValueType(new_value);
- ++map_iterator;
- }
- }
-
- // Occasionally shrink the map to avoid growing very large.
- if (map_.CalculateLoadFactor() < kMinimumLoadFactor) {
- map_.ShrinkToMaximumLoad();
- }
-}
-
-void BoxTable::DisallowNewWeakBoxedLambdas() {
- CHECK(!kUseReadBarrier);
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::lambda_table_lock_);
-
- allow_new_weaks_ = false;
-}
-
-void BoxTable::AllowNewWeakBoxedLambdas() {
- CHECK(!kUseReadBarrier);
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::lambda_table_lock_);
-
- allow_new_weaks_ = true;
- new_weaks_condition_.Broadcast(self);
-}
-
-void BoxTable::BroadcastForNewWeakBoxedLambdas() {
- CHECK(kUseReadBarrier);
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::lambda_table_lock_);
- new_weaks_condition_.Broadcast(self);
-}
-
-void BoxTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const {
- item.first = nullptr;
-
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- item.second = ValueType(); // Also clear the GC root.
-}
-
-bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
- return item.first == nullptr;
-}
-
-bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,
- const UnorderedMapKeyType& rhs) const {
- // Nothing needs this right now, but leave this assertion for later when
- // we need to look at the references inside of the closure.
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
-
- return lhs->ReferenceEquals(rhs);
-}
-
-size_t BoxTable::HashFn::operator()(const UnorderedMapKeyType& key) const {
- const lambda::Closure* closure = key;
- DCHECK_ALIGNED(closure, alignof(lambda::Closure));
-
- // Need to hold mutator_lock_ before calling into Closure::GetHashCode.
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- return closure->GetHashCode();
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
deleted file mode 100644
index adb7332..0000000
--- a/runtime/lambda/box_table.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_BOX_TABLE_H_
-#define ART_RUNTIME_LAMBDA_BOX_TABLE_H_
-
-#include "base/allocator.h"
-#include "base/hash_map.h"
-#include "gc_root.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "object_callbacks.h"
-
-#include <stdint.h>
-
-namespace art {
-
-class ArtMethod; // forward declaration
-
-namespace mirror {
-class Object; // forward declaration
-} // namespace mirror
-
-namespace lambda {
-struct Closure; // forward declaration
-
-/*
- * Store a table of boxed lambdas. This is required to maintain object referential equality
- * when a lambda is re-boxed.
- *
- * Conceptually, we store a mapping of Closures -> Weak Reference<Boxed Lambda Object>.
- * When too many objects get GCd, we shrink the underlying table to use less space.
- */
-class BoxTable FINAL {
- public:
- using ClosureType = art::lambda::Closure*;
-
- // Boxes a closure into an object. Returns null and throws an exception on failure.
- mirror::Object* BoxLambda(const ClosureType& closure)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
-
- // Unboxes an object back into the lambda. Returns false and throws an exception on failure.
- bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Sweep weak references to lambda boxes. Update the addresses if the objects have been
- // moved, and delete them from the table if the objects have been cleaned up.
- void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
-
- // GC callback: Temporarily block anyone from touching the map.
- void DisallowNewWeakBoxedLambdas()
- REQUIRES(!Locks::lambda_table_lock_);
-
- // GC callback: Unblock any readers who have been queued waiting to touch the map.
- void AllowNewWeakBoxedLambdas()
- REQUIRES(!Locks::lambda_table_lock_);
-
- // GC callback: Unblock any readers who have been queued waiting to touch the map.
- void BroadcastForNewWeakBoxedLambdas()
- REQUIRES(!Locks::lambda_table_lock_);
-
- BoxTable();
- ~BoxTable();
-
- private:
- // Explanation:
- // - After all threads are suspended (exclusive mutator lock),
- // the concurrent-copying GC can move objects from the "from" space to the "to" space.
- // If an object is moved at that time and *before* SweepSystemWeaks are called then
- // we don't know if the move has happened yet.
- // Successive reads will then (incorrectly) look at the objects in the "from" space,
- // which is a problem since the objects have been already forwarded and mutations
- // would not be visible in the right space.
- // Instead, use a GcRoot here which will be automatically updated by the GC.
- //
- // Also, any reads should be protected by a read barrier to always give us the "to" space address.
- using ValueType = GcRoot<mirror::Object>;
-
- // Attempt to look up the lambda in the map, or return null if it's not there yet.
- ValueType FindBoxedLambda(const ClosureType& closure) const
- SHARED_REQUIRES(Locks::lambda_table_lock_);
-
- // If the GC has come in and temporarily disallowed touching weaks, block until is it allowed.
- void BlockUntilWeaksAllowed()
- SHARED_REQUIRES(Locks::lambda_table_lock_);
-
- // Wrap the Closure into a unique_ptr so that the HashMap can delete its memory automatically.
- using UnorderedMapKeyType = ClosureType;
-
- // EmptyFn implementation for art::HashMap
- struct EmptyFn {
- void MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const
- NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
-
- bool IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const;
- };
-
- // HashFn implementation for art::HashMap
- struct HashFn {
- size_t operator()(const UnorderedMapKeyType& key) const
- NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
- };
-
- // EqualsFn implementation for art::HashMap
- struct EqualsFn {
- bool operator()(const UnorderedMapKeyType& lhs, const UnorderedMapKeyType& rhs) const
- NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
- };
-
- using UnorderedMap = art::HashMap<UnorderedMapKeyType,
- ValueType,
- EmptyFn,
- HashFn,
- EqualsFn,
- TrackingAllocator<std::pair<ClosureType, ValueType>,
- kAllocatorTagLambdaBoxTable>>;
-
- UnorderedMap map_ GUARDED_BY(Locks::lambda_table_lock_);
- bool allow_new_weaks_ GUARDED_BY(Locks::lambda_table_lock_);
- ConditionVariable new_weaks_condition_ GUARDED_BY(Locks::lambda_table_lock_);
-
- // Shrink the map when we get below this load factor.
- // (This is an arbitrary value that should be large enough to prevent aggressive map erases
- // from shrinking the table too often.)
- static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2;
-
- DISALLOW_COPY_AND_ASSIGN(BoxTable);
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_BOX_TABLE_H_
diff --git a/runtime/lambda/closure.cc b/runtime/lambda/closure.cc
deleted file mode 100644
index 179e4ee..0000000
--- a/runtime/lambda/closure.cc
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "lambda/closure.h"
-
-#include "base/logging.h"
-#include "lambda/art_lambda_method.h"
-#include "runtime/mirror/object_reference.h"
-
-static constexpr const bool kClosureSupportsReferences = false;
-static constexpr const bool kClosureSupportsGarbageCollection = false;
-
-namespace art {
-namespace lambda {
-
-template <typename T>
-// TODO: can I return T __attribute__((__aligned__(1)))* here instead?
-const uint8_t* Closure::GetUnsafeAtOffset(size_t offset) const {
- // Do not DCHECK here with existing helpers since most of them will call into this function.
- return reinterpret_cast<const uint8_t*>(captured_) + offset;
-}
-
-size_t Closure::GetCapturedVariableSize(ShortyFieldType variable_type, size_t offset) const {
- switch (variable_type) {
- case ShortyFieldType::kLambda:
- {
- return GetClosureSize(GetUnsafeAtOffset<Closure>(offset));
- }
- default:
- DCHECK(variable_type.IsStaticSize());
- return variable_type.GetStaticSize();
- }
-}
-
-// Templatize the flags to give the compiler a fighting chance to eliminate
-// any unnecessary code through different uses of this function.
-template <Closure::VariableInfo::Flags flags>
-inline Closure::VariableInfo Closure::ParseTypeDescriptor(const char* type_descriptor,
- size_t upto_index) const {
- DCHECK(type_descriptor != nullptr);
-
- VariableInfo result;
-
- ShortyFieldType last_type;
- size_t offset = (flags & VariableInfo::kOffset) ? GetStartingOffset() : 0;
- size_t prev_offset = 0;
- size_t count = 0;
-
- while ((type_descriptor =
- ShortyFieldType::ParseFromFieldTypeDescriptor(type_descriptor, &last_type)) != nullptr) {
- count++;
-
- if (flags & VariableInfo::kOffset) {
- // Accumulate the sizes of all preceding captured variables as the current offset only.
- offset += prev_offset;
- prev_offset = GetCapturedVariableSize(last_type, offset);
- }
-
- if ((count > upto_index)) {
- break;
- }
- }
-
- if (flags & VariableInfo::kVariableType) {
- result.variable_type_ = last_type;
- }
-
- if (flags & VariableInfo::kIndex) {
- result.index_ = count;
- }
-
- if (flags & VariableInfo::kCount) {
- result.count_ = count;
- }
-
- if (flags & VariableInfo::kOffset) {
- result.offset_ = offset;
- }
-
- // TODO: We should probably store the result of this in the ArtLambdaMethod,
- // to avoid re-computing the data every single time for static closures.
- return result;
-}
-
-size_t Closure::GetCapturedVariablesSize() const {
- const size_t captured_variable_offset = offsetof(Closure, captured_);
- DCHECK_GE(GetSize(), captured_variable_offset); // Prevent underflows.
- return GetSize() - captured_variable_offset;
-}
-
-size_t Closure::GetSize() const {
- const size_t static_closure_size = lambda_info_->GetStaticClosureSize();
- if (LIKELY(lambda_info_->IsStaticSize())) {
- return static_closure_size;
- }
-
- DCHECK_GE(static_closure_size, sizeof(captured_[0].dynamic_.size_));
- const size_t dynamic_closure_size = captured_[0].dynamic_.size_;
- // The dynamic size better be at least as big as the static size.
- DCHECK_GE(dynamic_closure_size, static_closure_size);
-
- return dynamic_closure_size;
-}
-
-void Closure::CopyTo(void* target, size_t target_size) const {
- DCHECK_GE(target_size, GetSize());
-
- // TODO: using memcpy is unsafe with read barriers, fix this once we add reference support
- static_assert(kClosureSupportsReferences == false,
- "Do not use memcpy with readbarrier references");
- memcpy(target, this, GetSize());
-}
-
-ArtMethod* Closure::GetTargetMethod() const {
- return const_cast<ArtMethod*>(lambda_info_->GetArtMethod());
-}
-
-uint32_t Closure::GetHashCode() const {
- // Start with a non-zero constant, a prime number.
- uint32_t result = 17;
-
- // Include the hash with the ArtMethod.
- {
- uintptr_t method = reinterpret_cast<uintptr_t>(GetTargetMethod());
- result = 31 * result + Low32Bits(method);
- if (sizeof(method) == sizeof(uint64_t)) {
- result = 31 * result + High32Bits(method);
- }
- }
-
- // Include a hash for each captured variable.
- for (size_t i = 0; i < GetCapturedVariablesSize(); ++i) {
- // TODO: not safe for GC-able values since the address can move and the hash code would change.
- uint8_t captured_variable_raw_value;
- CopyUnsafeAtOffset<uint8_t>(i, /*out*/&captured_variable_raw_value); // NOLINT: [whitespace/comma] [3]
-
- result = 31 * result + captured_variable_raw_value;
- }
-
- // TODO: Fix above loop to work for objects and lambdas.
- static_assert(kClosureSupportsGarbageCollection == false,
- "Need to update above loop to read the hash code from the "
- "objects and lambdas recursively");
-
- return result;
-}
-
-bool Closure::ReferenceEquals(const Closure* other) const {
- DCHECK(other != nullptr);
-
- // TODO: Need rework to use read barriers once closures have references inside of them that can
- // move. Until then, it's safe to just compare the data inside of it directly.
- static_assert(kClosureSupportsReferences == false,
- "Unsafe to use memcmp in read barrier collector");
-
- if (GetSize() != other->GetSize()) {
- return false;
- }
-
- return memcmp(this, other, GetSize());
-}
-
-size_t Closure::GetNumberOfCapturedVariables() const {
- // TODO: refactor into art_lambda_method.h. Parsing should only be required here as a DCHECK.
- VariableInfo variable_info =
- ParseTypeDescriptor<VariableInfo::kCount>(GetCapturedVariablesTypeDescriptor(),
- VariableInfo::kUpToIndexMax);
- size_t count = variable_info.count_;
- // Assuming each variable was 1 byte, the size should always be greater or equal than the count.
- DCHECK_LE(count, GetCapturedVariablesSize());
- return count;
-}
-
-const char* Closure::GetCapturedVariablesTypeDescriptor() const {
- return lambda_info_->GetCapturedVariablesTypeDescriptor();
-}
-
-ShortyFieldType Closure::GetCapturedShortyType(size_t index) const {
- DCHECK_LT(index, GetNumberOfCapturedVariables());
-
- VariableInfo variable_info =
- ParseTypeDescriptor<VariableInfo::kVariableType>(GetCapturedVariablesTypeDescriptor(),
- index);
-
- return variable_info.variable_type_;
-}
-
-uint32_t Closure::GetCapturedPrimitiveNarrow(size_t index) const {
- DCHECK(GetCapturedShortyType(index).IsPrimitiveNarrow());
-
- ShortyFieldType variable_type;
- size_t offset;
- GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
-
- // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
- // so that we can avoid this nonsense regarding memcpy always overflowing.
- // Plus, this additional switching seems redundant since the interpreter
- // would've done it already, and knows the exact type.
- uint32_t result = 0;
- static_assert(ShortyFieldTypeTraits::IsPrimitiveNarrowType<decltype(result)>(),
- "result must be a primitive narrow type");
- switch (variable_type) {
- case ShortyFieldType::kBoolean:
- CopyUnsafeAtOffset<bool>(offset, &result);
- break;
- case ShortyFieldType::kByte:
- CopyUnsafeAtOffset<uint8_t>(offset, &result);
- break;
- case ShortyFieldType::kChar:
- CopyUnsafeAtOffset<uint16_t>(offset, &result);
- break;
- case ShortyFieldType::kShort:
- CopyUnsafeAtOffset<int16_t>(offset, &result);
- break;
- case ShortyFieldType::kInt:
- CopyUnsafeAtOffset<int32_t>(offset, &result);
- break;
- case ShortyFieldType::kFloat:
- // XX: Maybe there should just be a GetCapturedPrimitive<T> to avoid this shuffle?
- // The interpreter's invoke seems to only special case references and wides,
- // everything else is treated as a generic 32-bit pattern.
- CopyUnsafeAtOffset<float>(offset, &result);
- break;
- default:
- LOG(FATAL)
- << "expected a valid narrow primitive shorty type but got "
- << static_cast<char>(variable_type);
- UNREACHABLE();
- }
-
- return result;
-}
-
-uint64_t Closure::GetCapturedPrimitiveWide(size_t index) const {
- DCHECK(GetCapturedShortyType(index).IsPrimitiveWide());
-
- ShortyFieldType variable_type;
- size_t offset;
- GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
-
- // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
- // so that we can avoid this nonsense regarding memcpy always overflowing.
- // Plus, this additional switching seems redundant since the interpreter
- // would've done it already, and knows the exact type.
- uint64_t result = 0;
- static_assert(ShortyFieldTypeTraits::IsPrimitiveWideType<decltype(result)>(),
- "result must be a primitive wide type");
- switch (variable_type) {
- case ShortyFieldType::kLong:
- CopyUnsafeAtOffset<int64_t>(offset, &result);
- break;
- case ShortyFieldType::kDouble:
- CopyUnsafeAtOffset<double>(offset, &result);
- break;
- default:
- LOG(FATAL)
- << "expected a valid primitive wide shorty type but got "
- << static_cast<char>(variable_type);
- UNREACHABLE();
- }
-
- return result;
-}
-
-mirror::Object* Closure::GetCapturedObject(size_t index) const {
- DCHECK(GetCapturedShortyType(index).IsObject());
-
- ShortyFieldType variable_type;
- size_t offset;
- GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
-
- // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
- // so that we can avoid this nonsense regarding memcpy always overflowing.
- // Plus, this additional switching seems redundant since the interpreter
- // would've done it already, and knows the exact type.
- mirror::Object* result = nullptr;
- static_assert(ShortyFieldTypeTraits::IsObjectType<decltype(result)>(),
- "result must be an object type");
- switch (variable_type) {
- case ShortyFieldType::kObject:
- // TODO: This seems unsafe. This may need to use gcroots.
- static_assert(kClosureSupportsGarbageCollection == false,
- "May need GcRoots and definitely need mutator locks");
- {
- mirror::CompressedReference<mirror::Object> compressed_result;
- CopyUnsafeAtOffset<uint32_t>(offset, &compressed_result);
- result = compressed_result.AsMirrorPtr();
- }
- break;
- default:
- CHECK(false)
- << "expected a valid shorty type but got " << static_cast<char>(variable_type);
- UNREACHABLE();
- }
-
- return result;
-}
-
-size_t Closure::GetCapturedClosureSize(size_t index) const {
- DCHECK(GetCapturedShortyType(index).IsLambda());
- size_t offset = GetCapturedVariableOffset(index);
-
- auto* captured_ptr = reinterpret_cast<const uint8_t*>(&captured_);
- size_t closure_size = GetClosureSize(captured_ptr + offset);
-
- return closure_size;
-}
-
-void Closure::CopyCapturedClosure(size_t index, void* destination, size_t destination_room) const {
- DCHECK(GetCapturedShortyType(index).IsLambda());
- size_t offset = GetCapturedVariableOffset(index);
-
- auto* captured_ptr = reinterpret_cast<const uint8_t*>(&captured_);
- size_t closure_size = GetClosureSize(captured_ptr + offset);
-
- static_assert(ShortyFieldTypeTraits::IsLambdaType<Closure*>(),
- "result must be a lambda type");
-
- CopyUnsafeAtOffset<Closure>(offset, destination, closure_size, destination_room);
-}
-
-size_t Closure::GetCapturedVariableOffset(size_t index) const {
- VariableInfo variable_info =
- ParseTypeDescriptor<VariableInfo::kOffset>(GetCapturedVariablesTypeDescriptor(),
- index);
-
- size_t offset = variable_info.offset_;
-
- return offset;
-}
-
-void Closure::GetCapturedVariableTypeAndOffset(size_t index,
- ShortyFieldType* out_type,
- size_t* out_offset) const {
- DCHECK(out_type != nullptr);
- DCHECK(out_offset != nullptr);
-
- static constexpr const VariableInfo::Flags kVariableTypeAndOffset =
- static_cast<VariableInfo::Flags>(VariableInfo::kVariableType | VariableInfo::kOffset);
- VariableInfo variable_info =
- ParseTypeDescriptor<kVariableTypeAndOffset>(GetCapturedVariablesTypeDescriptor(),
- index);
-
- ShortyFieldType variable_type = variable_info.variable_type_;
- size_t offset = variable_info.offset_;
-
- *out_type = variable_type;
- *out_offset = offset;
-}
-
-template <typename T>
-void Closure::CopyUnsafeAtOffset(size_t offset,
- void* destination,
- size_t src_size,
- size_t destination_room) const {
- DCHECK_GE(destination_room, src_size);
- const uint8_t* data_ptr = GetUnsafeAtOffset<T>(offset);
- memcpy(destination, data_ptr, sizeof(T));
-}
-
-// TODO: This is kind of ugly. I would prefer an unaligned_ptr<Closure> here.
-// Unfortunately C++ doesn't let you lower the alignment (i.e. alignas(1) Closure*) is not legal.
-size_t Closure::GetClosureSize(const uint8_t* closure) {
- DCHECK(closure != nullptr);
-
- static_assert(!std::is_base_of<mirror::Object, Closure>::value,
- "It might be unsafe to call memcpy on a managed object");
-
- // Safe as long as it's not a mirror Object.
- // TODO: Should probably wrap this in like MemCpyNative or some such which statically asserts
- // we aren't trying to copy mirror::Object data around.
- ArtLambdaMethod* closure_info;
- memcpy(&closure_info, closure + offsetof(Closure, lambda_info_), sizeof(closure_info));
-
- if (LIKELY(closure_info->IsStaticSize())) {
- return closure_info->GetStaticClosureSize();
- }
-
- // The size is dynamic, so we need to read it from captured_variables_ portion.
- size_t dynamic_size;
- memcpy(&dynamic_size,
- closure + offsetof(Closure, captured_[0].dynamic_.size_),
- sizeof(dynamic_size));
- static_assert(sizeof(dynamic_size) == sizeof(captured_[0].dynamic_.size_),
- "Dynamic size type must match the structural type of the size");
-
- DCHECK_GE(dynamic_size, closure_info->GetStaticClosureSize());
- return dynamic_size;
-}
-
-size_t Closure::GetStartingOffset() const {
- static constexpr const size_t captured_offset = offsetof(Closure, captured_);
- if (LIKELY(lambda_info_->IsStaticSize())) {
- return offsetof(Closure, captured_[0].static_variables_) - captured_offset;
- } else {
- return offsetof(Closure, captured_[0].dynamic_.variables_) - captured_offset;
- }
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/closure.h b/runtime/lambda/closure.h
deleted file mode 100644
index 31ff194..0000000
--- a/runtime/lambda/closure.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_CLOSURE_H_
-#define ART_RUNTIME_LAMBDA_CLOSURE_H_
-
-#include "base/macros.h"
-#include "base/mutex.h" // For Locks::mutator_lock_.
-#include "lambda/shorty_field_type.h"
-
-#include <stdint.h>
-
-namespace art {
-class ArtMethod; // forward declaration
-
-namespace mirror {
-class Object; // forward declaration
-} // namespace mirror
-
-namespace lambda {
-class ArtLambdaMethod; // forward declaration
-class ClosureBuilder; // forward declaration
-
-// Inline representation of a lambda closure.
-// Contains the target method and the set of packed captured variables as a copy.
-//
-// The closure itself is logically immutable, although in practice any object references
-// it (recursively) contains can be moved and updated by the GC.
-struct PACKED(sizeof(ArtLambdaMethod*)) Closure {
- // Get the size of the Closure in bytes.
- // This is necessary in order to allocate a large enough area to copy the Closure into.
- // Do *not* copy the closure with memcpy, since references also need to get moved.
- size_t GetSize() const;
-
- // Copy this closure into the target, whose memory size is specified by target_size.
- // Any object references are fixed up during the copy (if there was a read barrier).
- // The target_size must be at least as large as GetSize().
- void CopyTo(void* target, size_t target_size) const;
-
- // Get the target method, i.e. the method that will be dispatched into with invoke-lambda.
- ArtMethod* GetTargetMethod() const;
-
- // Calculates the hash code. Value is recomputed each time.
- uint32_t GetHashCode() const SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Is this the same closure as other? e.g. same target method, same variables captured.
- //
- // Determines whether the two Closures are interchangeable instances.
- // Does *not* call Object#equals recursively. If two Closures compare ReferenceEquals true that
- // means that they are interchangeable values (usually for the purpose of boxing/unboxing).
- bool ReferenceEquals(const Closure* other) const SHARED_REQUIRES(Locks::mutator_lock_);
-
- // How many variables were captured?
- size_t GetNumberOfCapturedVariables() const;
-
- // Returns a type descriptor string that represents each captured variable.
- // e.g. "Ljava/lang/Object;ZB" would mean a capture tuple of (Object, boolean, byte)
- const char* GetCapturedVariablesTypeDescriptor() const;
-
- // Returns the short type for the captured variable at index.
- // Index must be less than the number of captured variables.
- ShortyFieldType GetCapturedShortyType(size_t index) const;
-
- // Returns the 32-bit representation of a non-wide primitive at the captured variable index.
- // Smaller types are zero extended.
- // Index must be less than the number of captured variables.
- uint32_t GetCapturedPrimitiveNarrow(size_t index) const;
- // Returns the 64-bit representation of a wide primitive at the captured variable index.
- // Smaller types are zero extended.
- // Index must be less than the number of captured variables.
- uint64_t GetCapturedPrimitiveWide(size_t index) const;
- // Returns the object reference at the captured variable index.
- // The type at the index *must* be an object reference or a CHECK failure will occur.
- // Index must be less than the number of captured variables.
- mirror::Object* GetCapturedObject(size_t index) const SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Gets the size of a nested capture closure in bytes, at the captured variable index.
- // The type at the index *must* be a lambda closure or a CHECK failure will occur.
- size_t GetCapturedClosureSize(size_t index) const;
-
- // Copies a nested lambda closure at the captured variable index.
- // The destination must have enough room for the closure (see GetCapturedClosureSize).
- void CopyCapturedClosure(size_t index, void* destination, size_t destination_room) const;
-
- private:
- // Read out any non-lambda value as a copy.
- template <typename T>
- T GetCapturedVariable(size_t index) const;
-
- // Reconstruct the closure's captured variable info at runtime.
- struct VariableInfo {
- size_t index_;
- ShortyFieldType variable_type_;
- size_t offset_;
- size_t count_;
-
- enum Flags {
- kIndex = 0x1,
- kVariableType = 0x2,
- kOffset = 0x4,
- kCount = 0x8,
- };
-
- // Traverse to the end of the type descriptor list instead of stopping at some particular index.
- static constexpr size_t kUpToIndexMax = static_cast<size_t>(-1);
- };
-
- // Parse a type descriptor, stopping at index "upto_index".
- // Returns only the information requested in flags. All other fields are indeterminate.
- template <VariableInfo::Flags flags>
- inline VariableInfo ALWAYS_INLINE ParseTypeDescriptor(const char* type_descriptor,
- size_t upto_index) const;
-
- // Convenience function to call ParseTypeDescriptor with just the type and offset.
- void GetCapturedVariableTypeAndOffset(size_t index,
- ShortyFieldType* out_type,
- size_t* out_offset) const;
-
- // How many bytes do the captured variables take up? Runtime sizeof(captured_variables).
- size_t GetCapturedVariablesSize() const;
- // Get the size in bytes of the variable_type which is potentially stored at offset.
- size_t GetCapturedVariableSize(ShortyFieldType variable_type, size_t offset) const;
- // Get the starting offset (in bytes) for the 0th captured variable.
- // All offsets are relative to 'captured_'.
- size_t GetStartingOffset() const;
- // Get the offset for this index.
- // All offsets are relative to 'captuerd_'.
- size_t GetCapturedVariableOffset(size_t index) const;
-
- // Cast the data at '(char*)captured_[offset]' into T, returning its address.
- // This value should not be de-referenced directly since its unaligned.
- template <typename T>
- inline const uint8_t* GetUnsafeAtOffset(size_t offset) const;
-
- // Copy the data at the offset into the destination. DCHECKs that
- // the destination_room is large enough (in bytes) to fit the data.
- template <typename T>
- inline void CopyUnsafeAtOffset(size_t offset,
- void* destination,
- size_t src_size = sizeof(T),
- size_t destination_room = sizeof(T)) const;
-
- // Get the closure size from an unaligned (i.e. interior) closure pointer.
- static size_t GetClosureSize(const uint8_t* closure);
-
- ///////////////////////////////////////////////////////////////////////////////////
-
- // Compile-time known lambda information such as the type descriptor and size.
- ArtLambdaMethod* lambda_info_;
-
- // A contiguous list of captured variables, and possibly the closure size.
- // The runtime size can always be determined through GetSize().
- union {
- // Read from here if the closure size is static (ArtLambdaMethod::IsStatic)
- uint8_t static_variables_[0];
- struct {
- // Read from here if the closure size is dynamic (ArtLambdaMethod::IsDynamic)
- size_t size_; // The lambda_info_ and the size_ itself is also included as part of the size.
- uint8_t variables_[0];
- } dynamic_;
- } captured_[0];
- // captured_ will always consist of one array element at runtime.
- // Set to [0] so that 'size_' is not counted in sizeof(Closure).
-
- friend class ClosureBuilder;
- friend class ClosureTest;
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_CLOSURE_H_
diff --git a/runtime/lambda/closure_builder-inl.h b/runtime/lambda/closure_builder-inl.h
deleted file mode 100644
index 3cec21f..0000000
--- a/runtime/lambda/closure_builder-inl.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_INL_H_
-#define ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_INL_H_
-
-#include "lambda/closure_builder.h"
-#include <string.h>
-
-namespace art {
-namespace lambda {
-
-template <typename T, ClosureBuilder::ShortyTypeEnum kShortyType>
-void ClosureBuilder::CaptureVariablePrimitive(T value) {
- static_assert(ShortyFieldTypeTraits::IsPrimitiveType<T>(), "T must be a primitive type");
- const size_t type_size = ShortyFieldType(kShortyType).GetStaticSize();
- DCHECK_EQ(type_size, sizeof(T));
-
- // Copy the data while retaining the bit pattern. Strict-aliasing safe.
- ShortyFieldTypeTraits::MaxType value_storage = 0;
- memcpy(&value_storage, &value, sizeof(T));
-
- values_.push_back(value_storage);
- size_ += sizeof(T);
-
- shorty_types_ += kShortyType;
-}
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_INL_H_
diff --git a/runtime/lambda/closure_builder.cc b/runtime/lambda/closure_builder.cc
deleted file mode 100644
index 739e965..0000000
--- a/runtime/lambda/closure_builder.cc
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "lambda/closure_builder.h"
-
-#include "base/macros.h"
-#include "base/value_object.h"
-#include "lambda/art_lambda_method.h"
-#include "lambda/closure.h"
-#include "lambda/shorty_field_type.h"
-#include "runtime/mirror/object_reference.h"
-
-#include <stdint.h>
-#include <vector>
-
-namespace art {
-namespace lambda {
-
-/*
- * GC support TODOs:
- * (Although there's some code for storing objects, it is UNIMPLEMENTED(FATAL) because it is
- * incomplete).
- *
- * 1) GC needs to be able to traverse the Closure and visit any references.
- * It might be possible to get away with global roots in the short term.
- *
- * 2) Add brooks read barrier support. We can store the black/gray/white bits
- * in the lower 2 bits of the lambda art method pointer. Whenever a closure is copied
- * [to the stack] we'd need to add a cold path to turn it black.
- * (since there's only 3 colors, I can use the 4th value to indicate no-refs).
- * e.g. 0x0 = gray, 0x1 = white, 0x2 = black, 0x3 = no-nested-references
- * - Alternatively the GC can mark reference-less closures as always-black,
- * although it would need extra work to check for references.
- */
-
-void ClosureBuilder::CaptureVariableObject(mirror::Object* object) {
- auto compressed_reference = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(object);
- ShortyFieldTypeTraits::MaxType storage = 0;
-
- static_assert(sizeof(storage) >= sizeof(compressed_reference),
- "not enough room to store a compressed reference");
- memcpy(&storage, &compressed_reference, sizeof(compressed_reference));
-
- values_.push_back(storage);
- size_ += kObjectReferenceSize;
-
- static_assert(kObjectReferenceSize == sizeof(compressed_reference), "reference size mismatch");
-
- // TODO: needs more work to support concurrent GC
- if (kIsDebugBuild) {
- if (kUseReadBarrier) {
- UNIMPLEMENTED(FATAL) << "can't yet safely capture objects with read barrier";
- }
- }
-
- shorty_types_ += ShortyFieldType::kObject;
-}
-
-void ClosureBuilder::CaptureVariableLambda(Closure* closure) {
- DCHECK(closure != nullptr); // null closures not allowed, target method must be null instead.
- values_.push_back(reinterpret_cast<ShortyFieldTypeTraits::MaxType>(closure));
-
- if (LIKELY(is_dynamic_size_ == false)) {
- // Write in the extra bytes to store the dynamic size the first time.
- is_dynamic_size_ = true;
- size_ += sizeof(Closure::captured_[0].dynamic_.size_);
- }
-
- // A closure may be sized dynamically, so always query it for the true size.
- size_ += closure->GetSize();
-
- shorty_types_ += ShortyFieldType::kLambda;
-}
-
-size_t ClosureBuilder::GetSize() const {
- return size_;
-}
-
-size_t ClosureBuilder::GetCaptureCount() const {
- DCHECK_EQ(values_.size(), shorty_types_.size());
- return values_.size();
-}
-
-const std::string& ClosureBuilder::GetCapturedVariableShortyTypes() const {
- DCHECK_EQ(values_.size(), shorty_types_.size());
- return shorty_types_;
-}
-
-Closure* ClosureBuilder::CreateInPlace(void* memory, ArtLambdaMethod* target_method) const {
- DCHECK(memory != nullptr);
- DCHECK(target_method != nullptr);
- DCHECK_EQ(is_dynamic_size_, target_method->IsDynamicSize());
-
- CHECK_EQ(target_method->GetNumberOfCapturedVariables(), values_.size())
- << "number of variables captured at runtime does not match "
- << "number of variables captured at compile time";
-
- Closure* closure = new (memory) Closure;
- closure->lambda_info_ = target_method;
-
- static_assert(offsetof(Closure, captured_) == kInitialSize, "wrong initial size");
-
- size_t written_size;
- if (UNLIKELY(is_dynamic_size_)) {
- // The closure size must be set dynamically (i.e. nested lambdas).
- closure->captured_[0].dynamic_.size_ = GetSize();
- size_t header_size = offsetof(Closure, captured_[0].dynamic_.variables_);
- DCHECK_LE(header_size, GetSize());
- size_t variables_size = GetSize() - header_size;
- written_size =
- WriteValues(target_method,
- closure->captured_[0].dynamic_.variables_,
- header_size,
- variables_size);
- } else {
- // The closure size is known statically (i.e. no nested lambdas).
- DCHECK(GetSize() == target_method->GetStaticClosureSize());
- size_t header_size = offsetof(Closure, captured_[0].static_variables_);
- DCHECK_LE(header_size, GetSize());
- size_t variables_size = GetSize() - header_size;
- written_size =
- WriteValues(target_method,
- closure->captured_[0].static_variables_,
- header_size,
- variables_size);
- }
-
- DCHECK_EQ(written_size, closure->GetSize());
-
- return closure;
-}
-
-size_t ClosureBuilder::WriteValues(ArtLambdaMethod* target_method,
- uint8_t variables[],
- size_t header_size,
- size_t variables_size) const {
- size_t total_size = header_size;
- const char* shorty_types = target_method->GetCapturedVariablesShortyTypeDescriptor();
- DCHECK_STREQ(shorty_types, shorty_types_.c_str());
-
- size_t variables_offset = 0;
- size_t remaining_size = variables_size;
-
- const size_t shorty_count = target_method->GetNumberOfCapturedVariables();
- DCHECK_EQ(shorty_count, GetCaptureCount());
-
- for (size_t i = 0; i < shorty_count; ++i) {
- ShortyFieldType shorty{shorty_types[i]}; // NOLINT [readability/braces] [4]
-
- size_t var_size;
- if (LIKELY(shorty.IsStaticSize())) {
- // TODO: needs more work to support concurrent GC, e.g. read barriers
- if (kUseReadBarrier == false) {
- if (UNLIKELY(shorty.IsObject())) {
- UNIMPLEMENTED(FATAL) << "can't yet safely write objects with read barrier";
- }
- } else {
- if (UNLIKELY(shorty.IsObject())) {
- UNIMPLEMENTED(FATAL) << "writing objects not yet supported, no GC support";
- }
- }
-
- var_size = shorty.GetStaticSize();
- DCHECK_LE(var_size, sizeof(values_[i]));
-
- // Safe even for objects (non-read barrier case) if we never suspend
- // while the ClosureBuilder is live.
- // FIXME: Need to add GC support for references in a closure.
- memcpy(&variables[variables_offset], &values_[i], var_size);
- } else {
- DCHECK(shorty.IsLambda())
- << " don't support writing dynamically sized types other than lambda";
-
- ShortyFieldTypeTraits::MaxType closure_raw = values_[i];
- Closure* nested_closure = reinterpret_cast<Closure*>(closure_raw);
-
- DCHECK(nested_closure != nullptr);
- nested_closure->CopyTo(&variables[variables_offset], remaining_size);
-
- var_size = nested_closure->GetSize();
- }
-
- total_size += var_size;
- DCHECK_GE(remaining_size, var_size);
- remaining_size -= var_size;
-
- variables_offset += var_size;
- }
-
- DCHECK_EQ('\0', shorty_types[shorty_count]);
- DCHECK_EQ(variables_offset, variables_size);
-
- return total_size;
-}
-
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/closure_builder.h b/runtime/lambda/closure_builder.h
deleted file mode 100644
index 23eb484..0000000
--- a/runtime/lambda/closure_builder.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_H_
-#define ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_H_
-
-#include "base/macros.h"
-#include "base/mutex.h" // For Locks::mutator_lock_.
-#include "base/value_object.h"
-#include "lambda/shorty_field_type.h"
-
-#include <stdint.h>
-#include <vector>
-
-namespace art {
-class ArtMethod; // forward declaration
-
-namespace mirror {
-class Object; // forward declaration
-} // namespace mirror
-
-namespace lambda {
-class ArtLambdaMethod; // forward declaration
-
-// Build a closure by capturing variables one at a time.
-// When all variables have been marked captured, the closure can be created in-place into
-// a target memory address.
-//
-// The mutator lock must be held for the duration of the lifetime of this object,
-// since it needs to temporarily store heap references into an internal list.
-class ClosureBuilder {
- public:
- using ShortyTypeEnum = decltype(ShortyFieldType::kByte);
-
- // Mark this primitive value to be captured as the specified type.
- template <typename T, ShortyTypeEnum kShortyType = ShortyFieldTypeSelectEnum<T>::value>
- void CaptureVariablePrimitive(T value);
-
- // Mark this object reference to be captured.
- void CaptureVariableObject(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Mark this lambda closure to be captured.
- void CaptureVariableLambda(Closure* closure);
-
- // Get the size (in bytes) of the closure.
- // This size is used to be able to allocate memory large enough to write the closure into.
- // Call 'CreateInPlace' to actually write the closure out.
- size_t GetSize() const;
-
- // Returns how many variables have been captured so far.
- size_t GetCaptureCount() const;
-
- // Get the list of captured variables' shorty field types.
- const std::string& GetCapturedVariableShortyTypes() const;
-
- // Creates a closure in-place and writes out the data into 'memory'.
- // Memory must be at least 'GetSize' bytes large.
- // All previously marked data to be captured is now written out.
- Closure* CreateInPlace(void* memory, ArtLambdaMethod* target_method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Locks need to be held for entire lifetime of ClosureBuilder.
- ClosureBuilder() SHARED_REQUIRES(Locks::mutator_lock_)
- {}
-
- // Locks need to be held for entire lifetime of ClosureBuilder.
- ~ClosureBuilder() SHARED_REQUIRES(Locks::mutator_lock_)
- {}
-
- private:
- // Initial size a closure starts out before any variables are written.
- // Header size only.
- static constexpr size_t kInitialSize = sizeof(ArtLambdaMethod*);
-
- // Write a Closure's variables field from the captured variables.
- // variables_size specified in bytes, and only includes enough room to write variables into.
- // Returns the calculated actual size of the closure.
- size_t WriteValues(ArtLambdaMethod* target_method,
- uint8_t variables[],
- size_t header_size,
- size_t variables_size) const SHARED_REQUIRES(Locks::mutator_lock_);
-
- size_t size_ = kInitialSize;
- bool is_dynamic_size_ = false;
- std::vector<ShortyFieldTypeTraits::MaxType> values_;
- std::string shorty_types_;
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_H_
diff --git a/runtime/lambda/closure_test.cc b/runtime/lambda/closure_test.cc
deleted file mode 100644
index 7c1bd0d..0000000
--- a/runtime/lambda/closure_test.cc
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "art_method.h"
-#include "lambda/art_lambda_method.h"
-#include "lambda/closure.h"
-#include "lambda/closure_builder.h"
-#include "lambda/closure_builder-inl.h"
-#include "utils.h"
-
-#include <numeric>
-#include <stdint.h>
-#include <type_traits>
-#include "gtest/gtest.h"
-
-// Turn this on for some extra printfs to help with debugging, since some code is optimized out.
-static constexpr const bool kDebuggingClosureTest = true;
-
-namespace std {
- using Closure = art::lambda::Closure;
-
- // Specialize std::default_delete so it knows how to properly delete closures
- // through the way we allocate them in this test.
- //
- // This is test-only because we don't want the rest of Art to do this.
- template <>
- struct default_delete<Closure> {
- void operator()(Closure* closure) const {
- delete[] reinterpret_cast<char*>(closure);
- }
- };
-} // namespace std
-
-namespace art {
-
-// Fake lock acquisition to please clang lock checker.
-// This doesn't actually acquire any locks because we don't need multiple threads in this gtest.
-struct SCOPED_CAPABILITY ScopedFakeLock {
- explicit ScopedFakeLock(MutatorMutex& mu) ACQUIRE(mu)
- : mu_(mu) {
- }
-
- ~ScopedFakeLock() RELEASE()
- {}
-
- MutatorMutex& mu_;
-};
-
-namespace lambda {
-
-class ClosureTest : public ::testing::Test {
- public:
- ClosureTest() = default;
- ~ClosureTest() = default;
-
- protected:
- static void SetUpTestCase() {
- }
-
- virtual void SetUp() {
- // Create a completely dummy method here.
- // It's "OK" because the Closure never needs to look inside of the ArtMethod
- // (it just needs to be non-null).
- uintptr_t ignore = 0xbadbad;
- fake_method_ = reinterpret_cast<ArtMethod*>(ignore);
- }
-
- static ::testing::AssertionResult IsResultSuccessful(bool result) {
- if (result) {
- return ::testing::AssertionSuccess();
- } else {
- return ::testing::AssertionFailure();
- }
- }
-
- // Create a closure that captures the static variables from 'args' by-value.
- // The lambda method's captured variables types must match the ones in 'args'.
- // -- This creates the closure directly in-memory by using memcpy.
- template <typename ... Args>
- static std::unique_ptr<Closure> CreateClosureStaticVariables(ArtLambdaMethod* lambda_method,
- Args&& ... args) {
- constexpr size_t header_size = sizeof(ArtLambdaMethod*);
- const size_t static_size = GetArgsSize(args ...) + header_size;
- EXPECT_GE(static_size, sizeof(Closure));
-
- // Can't just 'new' the Closure since we don't know the size up front.
- char* closure_as_char_array = new char[static_size];
- Closure* closure_ptr = new (closure_as_char_array) Closure;
-
- // Set up the data
- closure_ptr->lambda_info_ = lambda_method;
- CopyArgs(closure_ptr->captured_[0].static_variables_, args ...);
-
- // Make sure the entire thing is deleted once the unique_ptr goes out of scope.
- return std::unique_ptr<Closure>(closure_ptr); // NOLINT [whitespace/braces] [5]
- }
-
- // Copy variadic arguments into the destination array with memcpy.
- template <typename T, typename ... Args>
- static void CopyArgs(uint8_t destination[], T&& arg, Args&& ... args) {
- memcpy(destination, &arg, sizeof(arg));
- CopyArgs(destination + sizeof(arg), args ...);
- }
-
- // Base case: Done.
- static void CopyArgs(uint8_t destination[]) {
- UNUSED(destination);
- }
-
- // Create a closure that captures the static variables from 'args' by-value.
- // The lambda method's captured variables types must match the ones in 'args'.
- // -- This uses ClosureBuilder interface to set up the closure indirectly.
- template <typename ... Args>
- static std::unique_ptr<Closure> CreateClosureStaticVariablesFromBuilder(
- ArtLambdaMethod* lambda_method,
- Args&& ... args) {
- // Acquire a fake lock since closure_builder needs it.
- ScopedFakeLock fake_lock(*Locks::mutator_lock_);
-
- ClosureBuilder closure_builder;
- CaptureVariableFromArgsList(/*out*/closure_builder, args ...);
-
- EXPECT_EQ(sizeof...(args), closure_builder.GetCaptureCount());
-
- constexpr size_t header_size = sizeof(ArtLambdaMethod*);
- const size_t static_size = GetArgsSize(args ...) + header_size;
- EXPECT_GE(static_size, sizeof(Closure));
-
- // For static variables, no nested closure, so size must match exactly.
- EXPECT_EQ(static_size, closure_builder.GetSize());
-
- // Can't just 'new' the Closure since we don't know the size up front.
- char* closure_as_char_array = new char[static_size];
- Closure* closure_ptr = new (closure_as_char_array) Closure;
-
- // The closure builder packs the captured variables into a Closure.
- closure_builder.CreateInPlace(closure_ptr, lambda_method);
-
- // Make sure the entire thing is deleted once the unique_ptr goes out of scope.
- return std::unique_ptr<Closure>(closure_ptr); // NOLINT [whitespace/braces] [5]
- }
-
- // Call the correct ClosureBuilder::CaptureVariableXYZ function based on the type of args.
- // Invokes for each arg in args.
- template <typename ... Args>
- static void CaptureVariableFromArgsList(/*out*/ClosureBuilder& closure_builder, Args ... args) {
- int ignore[] = {
- (CaptureVariableFromArgs(/*out*/closure_builder, args),0)... // NOLINT [whitespace/comma] [3]
- };
- UNUSED(ignore);
- }
-
- // ClosureBuilder::CaptureVariablePrimitive for types that are primitive only.
- template <typename T>
- typename std::enable_if<ShortyFieldTypeTraits::IsPrimitiveType<T>()>::type
- static CaptureVariableFromArgs(/*out*/ClosureBuilder& closure_builder, T value) {
- static_assert(ShortyFieldTypeTraits::IsPrimitiveType<T>(), "T must be a shorty primitive");
- closure_builder.CaptureVariablePrimitive<T, ShortyFieldTypeSelectEnum<T>::value>(value);
- }
-
- // ClosureBuilder::CaptureVariableObject for types that are objects only.
- template <typename T>
- typename std::enable_if<ShortyFieldTypeTraits::IsObjectType<T>()>::type
- static CaptureVariableFromArgs(/*out*/ClosureBuilder& closure_builder, const T* object) {
- ScopedFakeLock fake_lock(*Locks::mutator_lock_);
- closure_builder.CaptureVariableObject(object);
- }
-
- // Sum of sizeof(Args...).
- template <typename T, typename ... Args>
- static constexpr size_t GetArgsSize(T&& arg, Args&& ... args) {
- return sizeof(arg) + GetArgsSize(args ...);
- }
-
- // Base case: Done.
- static constexpr size_t GetArgsSize() {
- return 0;
- }
-
- // Take "U" and memcpy it into a "T". T starts out as (T)0.
- template <typename T, typename U>
- static T ExpandingBitCast(const U& val) {
- static_assert(sizeof(T) >= sizeof(U), "U too large");
- T new_val = static_cast<T>(0);
- memcpy(&new_val, &val, sizeof(U));
- return new_val;
- }
-
- // Templatized extraction from closures by checking their type with enable_if.
- template <typename T>
- static typename std::enable_if<ShortyFieldTypeTraits::IsPrimitiveNarrowType<T>()>::type
- ExpectCapturedVariable(const Closure* closure, size_t index, T value) {
- EXPECT_EQ(ExpandingBitCast<uint32_t>(value), closure->GetCapturedPrimitiveNarrow(index))
- << " with index " << index;
- }
-
- template <typename T>
- static typename std::enable_if<ShortyFieldTypeTraits::IsPrimitiveWideType<T>()>::type
- ExpectCapturedVariable(const Closure* closure, size_t index, T value) {
- EXPECT_EQ(ExpandingBitCast<uint64_t>(value), closure->GetCapturedPrimitiveWide(index))
- << " with index " << index;
- }
-
- // Templatized SFINAE for Objects so we can get better error messages.
- template <typename T>
- static typename std::enable_if<ShortyFieldTypeTraits::IsObjectType<T>()>::type
- ExpectCapturedVariable(const Closure* closure, size_t index, const T* object) {
- EXPECT_EQ(object, closure->GetCapturedObject(index))
- << " with index " << index;
- }
-
- template <typename ... Args>
- void TestPrimitive(const char *descriptor, Args ... args) {
- const char* shorty = descriptor;
-
- SCOPED_TRACE(descriptor);
-
- ASSERT_EQ(strlen(shorty), sizeof...(args))
- << "test error: descriptor must have same # of types as the # of captured variables";
-
- // Important: This fake lambda method needs to out-live any Closures we create with it.
- ArtLambdaMethod lambda_method{fake_method_, // NOLINT [whitespace/braces] [5]
- descriptor, // NOLINT [whitespace/blank_line] [2]
- shorty,
- };
-
- std::unique_ptr<Closure> closure_a;
- std::unique_ptr<Closure> closure_b;
-
- // Test the closure twice when it's constructed in different ways.
- {
- // Create the closure in a "raw" manner, that is directly with memcpy
- // since we know the underlying data format.
- // This simulates how the compiler would lay out the data directly.
- SCOPED_TRACE("raw closure");
- std::unique_ptr<Closure> closure_raw = CreateClosureStaticVariables(&lambda_method, args ...);
-
- if (kDebuggingClosureTest) {
- std::cerr << "closure raw address: " << closure_raw.get() << std::endl;
- }
- TestPrimitiveWithClosure(closure_raw.get(), descriptor, shorty, args ...);
- closure_a = std::move(closure_raw);
- }
-
- {
- // Create the closure with the ClosureBuilder, which is done indirectly.
- // This simulates how the interpreter would create the closure dynamically at runtime.
- SCOPED_TRACE("closure from builder");
- std::unique_ptr<Closure> closure_built =
- CreateClosureStaticVariablesFromBuilder(&lambda_method, args ...);
- if (kDebuggingClosureTest) {
- std::cerr << "closure built address: " << closure_built.get() << std::endl;
- }
- TestPrimitiveWithClosure(closure_built.get(), descriptor, shorty, args ...);
- closure_b = std::move(closure_built);
- }
-
- // The closures should be identical memory-wise as well.
- EXPECT_EQ(closure_a->GetSize(), closure_b->GetSize());
- EXPECT_TRUE(memcmp(closure_a.get(),
- closure_b.get(),
- std::min(closure_a->GetSize(), closure_b->GetSize())) == 0);
- }
-
- template <typename ... Args>
- static void TestPrimitiveWithClosure(Closure* closure,
- const char* descriptor,
- const char* shorty,
- Args ... args) {
- EXPECT_EQ(sizeof(ArtLambdaMethod*) + GetArgsSize(args...), closure->GetSize());
- EXPECT_EQ(sizeof...(args), closure->GetNumberOfCapturedVariables());
- EXPECT_STREQ(descriptor, closure->GetCapturedVariablesTypeDescriptor());
- TestPrimitiveExpects(closure, shorty, /*index*/0, args ...);
- }
-
- // Call EXPECT_EQ for each argument in the closure's #GetCapturedX.
- template <typename T, typename ... Args>
- static void TestPrimitiveExpects(
- const Closure* closure, const char* shorty, size_t index, T arg, Args ... args) {
- ASSERT_EQ(ShortyFieldType(shorty[index]).GetStaticSize(), sizeof(T))
- << "Test error: Type mismatch at index " << index;
- ExpectCapturedVariable(closure, index, arg);
- EXPECT_EQ(ShortyFieldType(shorty[index]), closure->GetCapturedShortyType(index));
- TestPrimitiveExpects(closure, shorty, index + 1, args ...);
- }
-
- // Base case for EXPECT_EQ.
- static void TestPrimitiveExpects(const Closure* closure, const char* shorty, size_t index) {
- UNUSED(closure, shorty, index);
- }
-
- ArtMethod* fake_method_;
-};
-
-TEST_F(ClosureTest, TestTrivial) {
- ArtLambdaMethod lambda_method{fake_method_, // NOLINT [whitespace/braces] [5]
- "", // No captured variables // NOLINT [whitespace/blank_line] [2]
- "", // No captured variables
- };
-
- std::unique_ptr<Closure> closure = CreateClosureStaticVariables(&lambda_method);
-
- EXPECT_EQ(sizeof(ArtLambdaMethod*), closure->GetSize());
- EXPECT_EQ(0u, closure->GetNumberOfCapturedVariables());
-} // TEST_F
-
-TEST_F(ClosureTest, TestPrimitiveSingle) {
- TestPrimitive("Z", true);
- TestPrimitive("B", int8_t(0xde));
- TestPrimitive("C", uint16_t(0xbeef));
- TestPrimitive("S", int16_t(0xdead));
- TestPrimitive("I", int32_t(0xdeadbeef));
- TestPrimitive("F", 0.123f);
- TestPrimitive("J", int64_t(0xdeadbeef00c0ffee));
- TestPrimitive("D", 123.456);
-} // TEST_F
-
-TEST_F(ClosureTest, TestPrimitiveMany) {
- TestPrimitive("ZZ", true, false);
- TestPrimitive("ZZZ", true, false, true);
- TestPrimitive("BBBB", int8_t(0xde), int8_t(0xa0), int8_t(0xff), int8_t(0xcc));
- TestPrimitive("CC", uint16_t(0xbeef), uint16_t(0xdead));
- TestPrimitive("SSSS", int16_t(0xdead), int16_t(0xc0ff), int16_t(0xf000), int16_t(0xbaba));
- TestPrimitive("III", int32_t(0xdeadbeef), int32_t(0xc0ffee), int32_t(0xbeefdead));
- TestPrimitive("FF", 0.123f, 555.666f);
- TestPrimitive("JJJ", int64_t(0xdeadbeef00c0ffee), int64_t(0x123), int64_t(0xc0ffee));
- TestPrimitive("DD", 123.456, 777.888);
-} // TEST_F
-
-TEST_F(ClosureTest, TestPrimitiveMixed) {
- TestPrimitive("ZZBBCCSSIIFFJJDD",
- true, false,
- int8_t(0xde), int8_t(0xa0),
- uint16_t(0xbeef), uint16_t(0xdead),
- int16_t(0xdead), int16_t(0xc0ff),
- int32_t(0xdeadbeef), int32_t(0xc0ffee),
- 0.123f, 555.666f,
- int64_t(0xdeadbeef00c0ffee), int64_t(0x123),
- 123.456, 777.888);
-} // TEST_F
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/leaking_allocator.cc b/runtime/lambda/leaking_allocator.cc
deleted file mode 100644
index 22bb294..0000000
--- a/runtime/lambda/leaking_allocator.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/bit_utils.h"
-#include "lambda/leaking_allocator.h"
-#include "linear_alloc.h"
-#include "runtime.h"
-
-namespace art {
-namespace lambda {
-
-void* LeakingAllocator::AllocateMemoryImpl(Thread* self, size_t byte_size, size_t align_size) {
- // TODO: use GetAllocatorForClassLoader to allocate lambda ArtMethod data.
- void* mem = Runtime::Current()->GetLinearAlloc()->Alloc(self, byte_size);
- DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(mem), align_size);
- return mem;
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/leaking_allocator.h b/runtime/lambda/leaking_allocator.h
deleted file mode 100644
index cb5a1bf..0000000
--- a/runtime/lambda/leaking_allocator.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_
-#define ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_
-
-#include <utility> // std::forward
-#include <type_traits> // std::aligned_storage
-
-namespace art {
-class Thread; // forward declaration
-
-namespace lambda {
-
-// Temporary class to centralize all the leaking allocations.
-// Allocations made through this class are never freed, but it is a placeholder
-// that means that the calling code needs to be rewritten to properly:
-//
-// (a) Have a lifetime scoped to some other entity.
-// (b) Not be allocated over and over again if it was already allocated once (immutable data).
-//
-// TODO: do all of the above a/b for each callsite, and delete this class.
-class LeakingAllocator {
- public:
- // An opaque type which is guaranteed for:
- // * a) be large enough to hold T (e.g. for in-place new)
- // * b) be well-aligned (so that reads/writes are well-defined) to T
- // * c) strict-aliasing compatible with T*
- //
- // Nominally used to allocate memory for yet unconstructed instances of T.
- template <typename T>
- using AlignedMemoryStorage = typename std::aligned_storage<sizeof(T), alignof(T)>::type;
-
- // Allocate byte_size bytes worth of memory. Never freed.
- template <typename T>
- static AlignedMemoryStorage<T>* AllocateMemory(Thread* self, size_t byte_size = sizeof(T)) {
- return reinterpret_cast<AlignedMemoryStorage<T>*>(
- AllocateMemoryImpl(self, byte_size, alignof(T)));
- }
-
- // Make a new instance of T, flexibly sized, in-place at newly allocated memory. Never freed.
- template <typename T, typename... Args>
- static T* MakeFlexibleInstance(Thread* self, size_t byte_size, Args&&... args) {
- return new (AllocateMemory<T>(self, byte_size)) T(std::forward<Args>(args)...);
- }
-
- // Make a new instance of T in-place at newly allocated memory. Never freed.
- template <typename T, typename... Args>
- static T* MakeInstance(Thread* self, Args&&... args) {
- return new (AllocateMemory<T>(self, sizeof(T))) T(std::forward<Args>(args)...);
- }
-
- private:
- static void* AllocateMemoryImpl(Thread* self, size_t byte_size, size_t align_size);
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_
diff --git a/runtime/lambda/shorty_field_type.h b/runtime/lambda/shorty_field_type.h
deleted file mode 100644
index c314fd2..0000000
--- a/runtime/lambda/shorty_field_type.h
+++ /dev/null
@@ -1,475 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_SHORTY_FIELD_TYPE_H_
-#define ART_RUNTIME_LAMBDA_SHORTY_FIELD_TYPE_H_
-
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/value_object.h"
-#include "globals.h"
-#include "runtime/primitive.h"
-
-#include <ostream>
-
-namespace art {
-
-namespace mirror {
-class Object; // forward declaration
-} // namespace mirror
-
-namespace lambda {
-
-struct Closure; // forward declaration
-
-// TODO: Refactor together with primitive.h
-
-// The short form of a field type descriptor. Corresponds to ShortyFieldType in dex specification.
-// Only types usable by a field (and locals) are allowed (i.e. no void type).
-// Note that arrays and objects are treated both as 'L'.
-//
-// This is effectively a 'char' enum-like zero-cost type-safe wrapper with extra helper functions.
-struct ShortyFieldType : ValueObject {
- // Use as if this was an enum class, e.g. 'ShortyFieldType::kBoolean'.
- enum : char {
- // Primitives (Narrow):
- kBoolean = 'Z',
- kByte = 'B',
- kChar = 'C',
- kShort = 'S',
- kInt = 'I',
- kFloat = 'F',
- // Primitives (Wide):
- kLong = 'J',
- kDouble = 'D',
- // Managed types:
- kObject = 'L', // This can also be an array (which is otherwise '[' in a non-shorty).
- kLambda = '\\',
- }; // NOTE: This is an anonymous enum so we can get exhaustive switch checking from the compiler.
-
- // Implicitly construct from the enum above. Value must be one of the enum list members above.
- // Always safe to use, does not do any DCHECKs.
- inline constexpr ShortyFieldType(decltype(kByte) c) : value_(c) {
- }
-
- // Default constructor. The initial value is undefined. Initialize before calling methods.
- // This is very unsafe but exists as a convenience to having undefined values.
- explicit ShortyFieldType() : value_(StaticCastValue(0)) {
- }
-
- // Explicitly construct from a char. Value must be one of the enum list members above.
- // Conversion is potentially unsafe, so DCHECKing is performed.
- explicit inline ShortyFieldType(char c) : value_(StaticCastValue(c)) {
- if (kIsDebugBuild) {
- // Verify at debug-time that our conversion is safe.
- ShortyFieldType ignored;
- DCHECK(MaybeCreate(c, &ignored)) << "unknown shorty field type '" << c << "'";
- }
- }
-
- // Attempts to parse the character in 'shorty_field_type' into its strongly typed version.
- // Returns false if the character was out of range of the grammar.
- static bool MaybeCreate(char shorty_field_type, ShortyFieldType* out) {
- DCHECK(out != nullptr);
- switch (shorty_field_type) {
- case kBoolean:
- case kByte:
- case kChar:
- case kShort:
- case kInt:
- case kFloat:
- case kLong:
- case kDouble:
- case kObject:
- case kLambda:
- *out = ShortyFieldType(static_cast<decltype(kByte)>(shorty_field_type));
- return true;
- default:
- break;
- }
-
- return false;
- }
-
- // Convert the first type in a field type descriptor string into a shorty.
- // Arrays are converted into objects.
- // Does not work for 'void' types (as they are illegal in a field type descriptor).
- static ShortyFieldType CreateFromFieldTypeDescriptor(const char* field_type_descriptor) {
- DCHECK(field_type_descriptor != nullptr);
- char c = *field_type_descriptor;
- if (UNLIKELY(c == kArray)) { // Arrays are treated as object references.
- c = kObject;
- }
- return ShortyFieldType{c}; // NOLINT [readability/braces] [4]
- }
-
- // Parse the first type in the field type descriptor string into a shorty.
- // See CreateFromFieldTypeDescriptor for more details.
- //
- // Returns the pointer offset into the middle of the field_type_descriptor
- // that would either point to the next shorty type, or to null if there are
- // no more types.
- //
- // DCHECKs that each of the nested types is a valid shorty field type. This
- // means the type descriptor must be already valid.
- static const char* ParseFromFieldTypeDescriptor(const char* field_type_descriptor,
- ShortyFieldType* out_type) {
- DCHECK(field_type_descriptor != nullptr);
-
- if (UNLIKELY(field_type_descriptor[0] == '\0')) {
- // Handle empty strings by immediately returning null.
- return nullptr;
- }
-
- // All non-empty strings must be a valid list of field type descriptors, otherwise
- // the DCHECKs will kick in and the program will crash.
- const char shorter_type = *field_type_descriptor;
-
- ShortyFieldType safe_type;
- bool type_set = MaybeCreate(shorter_type, &safe_type);
-
- // Lambda that keeps skipping characters until it sees ';'.
- // Stops one character -after- the ';'.
- auto skip_until_semicolon = [&field_type_descriptor]() {
- while (*field_type_descriptor != ';' && *field_type_descriptor != '\0') {
- ++field_type_descriptor;
- }
- DCHECK_NE(*field_type_descriptor, '\0')
- << " type descriptor terminated too early: " << field_type_descriptor;
- ++field_type_descriptor; // Skip the ';'
- };
-
- ++field_type_descriptor;
- switch (shorter_type) {
- case kObject:
- skip_until_semicolon();
-
- DCHECK(type_set);
- DCHECK(safe_type == kObject);
- break;
- case kArray:
- // Strip out all of the leading [[[[[s, we don't care if it's a multi-dimensional array.
- while (*field_type_descriptor == '[' && *field_type_descriptor != '\0') {
- ++field_type_descriptor;
- }
- DCHECK_NE(*field_type_descriptor, '\0')
- << " type descriptor terminated too early: " << field_type_descriptor;
- // Either a primitive, object, or closure left. No more arrays.
- {
- // Now skip all the characters that form the array's interior-most element type
- // (which itself is guaranteed not to be an array).
- ShortyFieldType array_interior_type;
- type_set = MaybeCreate(*field_type_descriptor, &array_interior_type);
- DCHECK(type_set) << " invalid remaining type descriptor " << field_type_descriptor;
-
- // Handle array-of-objects case like [[[[[LObject; and array-of-closures like [[[[[\Foo;
- if (*field_type_descriptor == kObject || *field_type_descriptor == kLambda) {
- skip_until_semicolon();
- } else {
- // Handle primitives which are exactly one character we can skip.
- DCHECK(array_interior_type.IsPrimitive());
- ++field_type_descriptor;
- }
- }
-
- safe_type = kObject;
- type_set = true;
- break;
- case kLambda:
- skip_until_semicolon();
-
- DCHECK(safe_type == kLambda);
- DCHECK(type_set);
- break;
- default:
- DCHECK_NE(kVoid, shorter_type) << "cannot make a ShortyFieldType from a void type";
- break;
- }
-
- DCHECK(type_set) << "invalid shorty type descriptor " << shorter_type;
-
- *out_type = safe_type;
- return type_set ? field_type_descriptor : nullptr;
- }
-
- // Explicitly convert to a char.
- inline explicit operator char() const {
- return value_;
- }
-
- // Is this a primitive?
- inline bool IsPrimitive() const {
- return IsPrimitiveNarrow() || IsPrimitiveWide();
- }
-
- // Is this a narrow primitive (i.e. can fit into 1 virtual register)?
- inline bool IsPrimitiveNarrow() const {
- switch (value_) {
- case kBoolean:
- case kByte:
- case kChar:
- case kShort:
- case kInt:
- case kFloat:
- return true;
- default:
- return false;
- }
- }
-
- // Is this a wide primitive (i.e. needs exactly 2 virtual registers)?
- inline bool IsPrimitiveWide() const {
- switch (value_) {
- case kLong:
- case kDouble:
- return true;
- default:
- return false;
- }
- }
-
- // Is this an object reference (which can also be an array)?
- inline bool IsObject() const {
- return value_ == kObject;
- }
-
- // Is this a lambda?
- inline bool IsLambda() const {
- return value_ == kLambda;
- }
-
- // Is the size of this (to store inline as a field) always known at compile-time?
- inline bool IsStaticSize() const {
- return !IsLambda();
- }
-
- // Get the compile-time size (to be able to store it inline as a field or on stack).
- // Dynamically-sized values such as lambdas return the guaranteed lower bound.
- inline size_t GetStaticSize() const {
- switch (value_) {
- case kBoolean:
- return sizeof(bool);
- case kByte:
- return sizeof(uint8_t);
- case kChar:
- return sizeof(int16_t);
- case kShort:
- return sizeof(uint16_t);
- case kInt:
- return sizeof(int32_t);
- case kLong:
- return sizeof(int64_t);
- case kFloat:
- return sizeof(float);
- case kDouble:
- return sizeof(double);
- case kObject:
- return kObjectReferenceSize;
- case kLambda:
- return sizeof(void*); // Large enough to store the ArtLambdaMethod
- default:
- DCHECK(false) << "unknown shorty field type '" << static_cast<char>(value_) << "'";
- UNREACHABLE();
- }
- }
-
- // Implicitly convert to the anonymous nested inner type. Used for exhaustive switch detection.
- inline operator decltype(kByte)() const {
- return value_;
- }
-
- // Returns a read-only static string representing the enum name, useful for printing/debug only.
- inline const char* ToString() const {
- switch (value_) {
- case kBoolean:
- return "kBoolean";
- case kByte:
- return "kByte";
- case kChar:
- return "kChar";
- case kShort:
- return "kShort";
- case kInt:
- return "kInt";
- case kLong:
- return "kLong";
- case kFloat:
- return "kFloat";
- case kDouble:
- return "kDouble";
- case kObject:
- return "kObject";
- case kLambda:
- return "kLambda";
- default:
- // Undefined behavior if we get this far. Pray the compiler gods are merciful.
- return "<undefined>";
- }
- }
-
- private:
- static constexpr const char kArray = '[';
- static constexpr const char kVoid = 'V';
-
- // Helper to statically cast anything into our nested anonymous enum type.
- template <typename T>
- inline static decltype(kByte) StaticCastValue(const T& anything) {
- return static_cast<decltype(value_)>(anything);
- }
-
- // The only field in this struct.
- decltype(kByte) value_;
-};
-
-
- // Print to an output stream.
-inline std::ostream& operator<<(std::ostream& ostream, ShortyFieldType shorty) {
- return ostream << shorty.ToString();
-}
-
-static_assert(sizeof(ShortyFieldType) == sizeof(char),
- "ShortyFieldType must be lightweight just like a char");
-
-// Compile-time trait information regarding the ShortyFieldType.
-// Used by static_asserts to verify that the templates are correctly used at compile-time.
-//
-// For example,
-// ShortyFieldTypeTraits::IsPrimitiveNarrowType<int64_t>() == true
-// ShortyFieldTypeTraits::IsObjectType<mirror::Object*>() == true
-struct ShortyFieldTypeTraits {
- // A type guaranteed to be large enough to holds any of the shorty field types.
- using MaxType = uint64_t;
-
- // Type traits: Returns true if 'T' is a valid type that can be represented by a shorty field type.
- template <typename T>
- static inline constexpr bool IsType() {
- return IsPrimitiveType<T>() || IsObjectType<T>() || IsLambdaType<T>();
- }
-
- // Returns true if 'T' is a primitive type (i.e. a built-in without nested references).
- template <typename T>
- static inline constexpr bool IsPrimitiveType() {
- return IsPrimitiveNarrowType<T>() || IsPrimitiveWideType<T>();
- }
-
- // Returns true if 'T' is a primitive type that is narrow (i.e. can be stored into 1 vreg).
- template <typename T>
- static inline constexpr bool IsPrimitiveNarrowType() {
- return IsPrimitiveNarrowTypeImpl(static_cast<T* const>(nullptr));
- }
-
- // Returns true if 'T' is a primitive type that is wide (i.e. needs 2 vregs for storage).
- template <typename T>
- static inline constexpr bool IsPrimitiveWideType() {
- return IsPrimitiveWideTypeImpl(static_cast<T* const>(nullptr));
- }
-
- // Returns true if 'T' is an object (i.e. it is a managed GC reference).
- // Note: This is equivalent to std::base_of<mirror::Object*, T>::value
- template <typename T>
- static inline constexpr bool IsObjectType() {
- return IsObjectTypeImpl(static_cast<T* const>(nullptr));
- }
-
- // Returns true if 'T' is a lambda (i.e. it is a closure with unknown static data);
- template <typename T>
- static inline constexpr bool IsLambdaType() {
- return IsLambdaTypeImpl(static_cast<T* const>(nullptr));
- }
-
- private:
-#define IS_VALID_TYPE_SPECIALIZATION(type, name) \
- static inline constexpr bool Is ## name ## TypeImpl(type* const = 0) { /*NOLINT*/ \
- return true; \
- } \
- \
- static_assert(sizeof(MaxType) >= sizeof(type), "MaxType too small")
-
- IS_VALID_TYPE_SPECIALIZATION(bool, PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION(int8_t, PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION(uint8_t, PrimitiveNarrow); // Not strictly true, but close enough.
- IS_VALID_TYPE_SPECIALIZATION(int16_t, PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION(uint16_t, PrimitiveNarrow); // Chars are unsigned.
- IS_VALID_TYPE_SPECIALIZATION(int32_t, PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION(uint32_t, PrimitiveNarrow); // Not strictly true, but close enough.
- IS_VALID_TYPE_SPECIALIZATION(float, PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION(int64_t, PrimitiveWide);
- IS_VALID_TYPE_SPECIALIZATION(uint64_t, PrimitiveWide); // Not strictly true, but close enough.
- IS_VALID_TYPE_SPECIALIZATION(double, PrimitiveWide);
- IS_VALID_TYPE_SPECIALIZATION(mirror::Object*, Object);
- IS_VALID_TYPE_SPECIALIZATION(Closure*, Lambda);
-#undef IS_VALID_TYPE_SPECIALIZATION
-
-#define IS_VALID_TYPE_SPECIALIZATION_IMPL(name) \
- template <typename T> \
- static inline constexpr bool Is ## name ## TypeImpl(T* const = 0) { \
- return false; \
- }
-
- IS_VALID_TYPE_SPECIALIZATION_IMPL(PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION_IMPL(PrimitiveWide);
- IS_VALID_TYPE_SPECIALIZATION_IMPL(Object);
- IS_VALID_TYPE_SPECIALIZATION_IMPL(Lambda);
-
-#undef IS_VALID_TYPE_SPECIALIZATION_IMPL
-};
-
-// Maps the ShortyFieldType enum into it's C++ type equivalent, into the "type" typedef.
-// For example:
-// ShortyFieldTypeSelectType<ShortyFieldType::kBoolean>::type => bool
-// ShortyFieldTypeSelectType<ShortyFieldType::kLong>::type => int64_t
-//
-// Invalid enums will not have the type defined.
-template <decltype(ShortyFieldType::kByte) Shorty>
-struct ShortyFieldTypeSelectType {
-};
-
-// Maps the C++ type into it's ShortyFieldType enum equivalent, into the "value" constexpr.
-// For example:
-// ShortyFieldTypeSelectEnum<bool>::value => ShortyFieldType::kBoolean
-// ShortyFieldTypeSelectEnum<int64_t>::value => ShortyFieldType::kLong
-//
-// Signed-ness must match for a valid select, e.g. uint64_t will not map to kLong, but int64_t will.
-// Invalid types will not have the value defined (see e.g. ShortyFieldTypeTraits::IsType<T>())
-template <typename T>
-struct ShortyFieldTypeSelectEnum {
-};
-
-#define SHORTY_FIELD_TYPE_SELECT_IMPL(cpp_type, enum_element) \
-template <> \
-struct ShortyFieldTypeSelectType<ShortyFieldType::enum_element> { \
- using type = cpp_type; \
-}; \
-\
-template <> \
-struct ShortyFieldTypeSelectEnum<cpp_type> { \
- static constexpr const auto value = ShortyFieldType::enum_element; \
-}; \
-
-SHORTY_FIELD_TYPE_SELECT_IMPL(bool, kBoolean);
-SHORTY_FIELD_TYPE_SELECT_IMPL(int8_t, kByte);
-SHORTY_FIELD_TYPE_SELECT_IMPL(int16_t, kShort);
-SHORTY_FIELD_TYPE_SELECT_IMPL(uint16_t, kChar);
-SHORTY_FIELD_TYPE_SELECT_IMPL(int32_t, kInt);
-SHORTY_FIELD_TYPE_SELECT_IMPL(float, kFloat);
-SHORTY_FIELD_TYPE_SELECT_IMPL(int64_t, kLong);
-SHORTY_FIELD_TYPE_SELECT_IMPL(double, kDouble);
-SHORTY_FIELD_TYPE_SELECT_IMPL(mirror::Object*, kObject);
-SHORTY_FIELD_TYPE_SELECT_IMPL(Closure*, kLambda);
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_SHORTY_FIELD_TYPE_H_
diff --git a/runtime/lambda/shorty_field_type_test.cc b/runtime/lambda/shorty_field_type_test.cc
deleted file mode 100644
index 32bade9..0000000
--- a/runtime/lambda/shorty_field_type_test.cc
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "lambda/shorty_field_type.h"
-#include "mirror/object_reference.h"
-
-#include "utils.h"
-#include <numeric>
-#include <stdint.h>
-#include "gtest/gtest.h"
-
-#define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast<const void*>(expected), \
- reinterpret_cast<void*>(nullptr));
-
-namespace art {
-namespace lambda {
-
-class ShortyFieldTypeTest : public ::testing::Test {
- public:
- ShortyFieldTypeTest() = default;
- ~ShortyFieldTypeTest() = default;
-
- protected:
- static void SetUpTestCase() {
- }
-
- virtual void SetUp() {
- }
-
- static ::testing::AssertionResult IsResultSuccessful(bool result) {
- if (result) {
- return ::testing::AssertionSuccess();
- } else {
- return ::testing::AssertionFailure();
- }
- }
-
- template <typename T>
- static std::string ListToString(const T& list) {
- std::stringstream stream;
-
- stream << "[";
- for (auto&& val : list) {
- stream << val << ", ";
- }
- stream << "]";
-
- return stream.str();
- }
-
- // Compare two vector-like types for equality.
- template <typename T>
- static ::testing::AssertionResult AreListsEqual(const T& expected, const T& actual) {
- bool success = true;
- std::stringstream stream;
-
- if (expected.size() != actual.size()) {
- success = false;
- stream << "Expected list size: " << expected.size()
- << ", but got list size: " << actual.size();
- stream << std::endl;
- }
-
- for (size_t j = 0; j < std::min(expected.size(), actual.size()); ++j) {
- if (expected[j] != actual[j]) {
- success = false;
- stream << "Expected element '" << j << "' to be '" << expected[j] << "', but got actual: '"
- << actual[j] << "'.";
- stream << std::endl;
- }
- }
-
- if (success) {
- return ::testing::AssertionSuccess();
- }
-
- stream << "Expected list was: " << ListToString(expected)
- << ", actual list was: " << ListToString(actual);
-
- return ::testing::AssertionFailure() << stream.str();
- }
-
- static std::vector<ShortyFieldType> ParseLongTypeDescriptorsToList(const char* type_descriptor) {
- std::vector<ShortyFieldType> lst;
-
- ShortyFieldType shorty;
-
- const char* parsed = type_descriptor;
- while ((parsed = ShortyFieldType::ParseFromFieldTypeDescriptor(parsed, &shorty)) != nullptr) {
- lst.push_back(shorty);
- }
-
- return lst;
- }
-
- protected:
- // Shorthands for the ShortyFieldType constants.
- // The letters are the same as JNI letters, with kS_ being a lambda since \ is not available.
- static constexpr ShortyFieldType kSZ = ShortyFieldType::kBoolean;
- static constexpr ShortyFieldType kSB = ShortyFieldType::kByte;
- static constexpr ShortyFieldType kSC = ShortyFieldType::kChar;
- static constexpr ShortyFieldType kSS = ShortyFieldType::kShort;
- static constexpr ShortyFieldType kSI = ShortyFieldType::kInt;
- static constexpr ShortyFieldType kSF = ShortyFieldType::kFloat;
- static constexpr ShortyFieldType kSJ = ShortyFieldType::kLong;
- static constexpr ShortyFieldType kSD = ShortyFieldType::kDouble;
- static constexpr ShortyFieldType kSL = ShortyFieldType::kObject;
- static constexpr ShortyFieldType kS_ = ShortyFieldType::kLambda;
-};
-
-TEST_F(ShortyFieldTypeTest, TestMaybeCreate) {
- ShortyFieldType shorty;
-
- std::vector<char> shorties = {'Z', 'B', 'C', 'S', 'I', 'F', 'J', 'D', 'L', '\\'};
-
- // All valid 'shorty' characters are created successfully.
- for (const char c : shorties) {
- EXPECT_TRUE(ShortyFieldType::MaybeCreate(c, &shorty)) << c;
- EXPECT_EQ(c, static_cast<char>(c));
- }
-
- // All other characters can never be created.
- for (unsigned char c = 0; c < std::numeric_limits<unsigned char>::max(); ++c) {
- // Skip the valid characters.
- if (std::find(shorties.begin(), shorties.end(), c) != shorties.end()) { continue; }
- // All invalid characters should fail.
- EXPECT_FALSE(ShortyFieldType::MaybeCreate(static_cast<char>(c), &shorty)) << c;
- }
-} // TEST_F
-
-TEST_F(ShortyFieldTypeTest, TestCreateFromFieldTypeDescriptor) {
- // Sample input.
- std::vector<const char*> lengthies = {
- "Z", "B", "C", "S", "I", "F", "J", "D", "LObject;", "\\Closure;",
- "[Z", "[[B", "[[LObject;"
- };
-
- // Expected output.
- std::vector<ShortyFieldType> expected = {
- ShortyFieldType::kBoolean,
- ShortyFieldType::kByte,
- ShortyFieldType::kChar,
- ShortyFieldType::kShort,
- ShortyFieldType::kInt,
- ShortyFieldType::kFloat,
- ShortyFieldType::kLong,
- ShortyFieldType::kDouble,
- ShortyFieldType::kObject,
- ShortyFieldType::kLambda,
- // Arrays are always treated as objects.
- ShortyFieldType::kObject,
- ShortyFieldType::kObject,
- ShortyFieldType::kObject,
- };
-
- // All valid lengthy types are correctly turned into the expected shorty type.
- for (size_t i = 0; i < lengthies.size(); ++i) {
- EXPECT_EQ(expected[i], ShortyFieldType::CreateFromFieldTypeDescriptor(lengthies[i]));
- }
-} // TEST_F
-
-TEST_F(ShortyFieldTypeTest, TestParseFromFieldTypeDescriptor) {
- // Sample input.
- std::vector<const char*> lengthies = {
- // Empty list
- "",
- // Primitives
- "Z", "B", "C", "S", "I", "F", "J", "D",
- // Non-primitives
- "LObject;", "\\Closure;",
- // Arrays. The biggest PITA.
- "[Z", "[[B", "[[LObject;", "[[[[\\Closure;",
- // Multiple things at once:
- "ZBCSIFJD",
- "LObject;LObject;SSI",
- "[[ZDDZ",
- "[[LObject;[[Z[F\\Closure;LObject;",
- };
-
- // Expected output.
- std::vector<std::vector<ShortyFieldType>> expected = {
- // Empty list
- {},
- // Primitives
- {kSZ}, {kSB}, {kSC}, {kSS}, {kSI}, {kSF}, {kSJ}, {kSD},
- // Non-primitives.
- { ShortyFieldType::kObject }, { ShortyFieldType::kLambda },
- // Arrays are always treated as objects.
- { kSL }, { kSL }, { kSL }, { kSL },
- // Multiple things at once:
- { kSZ, kSB, kSC, kSS, kSI, kSF, kSJ, kSD },
- { kSL, kSL, kSS, kSS, kSI },
- { kSL, kSD, kSD, kSZ },
- { kSL, kSL, kSL, kS_, kSL },
- };
-
- // Sanity check that the expected/actual lists are the same size.. when adding new entries.
- ASSERT_EQ(expected.size(), lengthies.size());
-
- // All valid lengthy types are correctly turned into the expected shorty type.
- for (size_t i = 0; i < expected.size(); ++i) {
- const std::vector<ShortyFieldType>& expected_list = expected[i];
- std::vector<ShortyFieldType> actual_list = ParseLongTypeDescriptorsToList(lengthies[i]);
- EXPECT_TRUE(AreListsEqual(expected_list, actual_list));
- }
-} // TEST_F
-
-// Helper class to probe a shorty's characteristics by minimizing copy-and-paste tests.
-template <typename T, decltype(ShortyFieldType::kByte) kShortyEnum>
-struct ShortyTypeCharacteristics {
- bool is_primitive_ = false;
- bool is_primitive_narrow_ = false;
- bool is_primitive_wide_ = false;
- bool is_object_ = false;
- bool is_lambda_ = false;
- size_t size_ = sizeof(T);
- bool is_dynamic_sized_ = false;
-
- void CheckExpects() {
- ShortyFieldType shorty = kShortyEnum;
-
- // Test the main non-parsing-related ShortyFieldType characteristics.
- EXPECT_EQ(is_primitive_, shorty.IsPrimitive());
- EXPECT_EQ(is_primitive_narrow_, shorty.IsPrimitiveNarrow());
- EXPECT_EQ(is_primitive_wide_, shorty.IsPrimitiveWide());
- EXPECT_EQ(is_object_, shorty.IsObject());
- EXPECT_EQ(is_lambda_, shorty.IsLambda());
- EXPECT_EQ(size_, shorty.GetStaticSize());
- EXPECT_EQ(is_dynamic_sized_, !shorty.IsStaticSize());
-
- // Test compile-time ShortyFieldTypeTraits.
- EXPECT_TRUE(ShortyFieldTypeTraits::IsType<T>());
- EXPECT_EQ(is_primitive_, ShortyFieldTypeTraits::IsPrimitiveType<T>());
- EXPECT_EQ(is_primitive_narrow_, ShortyFieldTypeTraits::IsPrimitiveNarrowType<T>());
- EXPECT_EQ(is_primitive_wide_, ShortyFieldTypeTraits::IsPrimitiveWideType<T>());
- EXPECT_EQ(is_object_, ShortyFieldTypeTraits::IsObjectType<T>());
- EXPECT_EQ(is_lambda_, ShortyFieldTypeTraits::IsLambdaType<T>());
-
- // Test compile-time ShortyFieldType selectors
- static_assert(std::is_same<T, typename ShortyFieldTypeSelectType<kShortyEnum>::type>::value,
- "ShortyFieldType Enum->Type incorrect mapping");
- auto kActualEnum = ShortyFieldTypeSelectEnum<T>::value; // Do not ODR-use, avoid linker error.
- EXPECT_EQ(kShortyEnum, kActualEnum);
- }
-};
-
-TEST_F(ShortyFieldTypeTest, TestCharacteristicsAndTraits) {
- // Boolean test
- {
- SCOPED_TRACE("boolean");
- ShortyTypeCharacteristics<bool, ShortyFieldType::kBoolean> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Byte test
- {
- SCOPED_TRACE("byte");
- ShortyTypeCharacteristics<int8_t, ShortyFieldType::kByte> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Char test
- {
- SCOPED_TRACE("char");
- ShortyTypeCharacteristics<uint16_t, ShortyFieldType::kChar> chars; // Char is unsigned.
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Short test
- {
- SCOPED_TRACE("short");
- ShortyTypeCharacteristics<int16_t, ShortyFieldType::kShort> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Int test
- {
- SCOPED_TRACE("int");
- ShortyTypeCharacteristics<int32_t, ShortyFieldType::kInt> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Long test
- {
- SCOPED_TRACE("long");
- ShortyTypeCharacteristics<int64_t, ShortyFieldType::kLong> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_wide_ = true;
- chars.CheckExpects();
- }
-
- // Float test
- {
- SCOPED_TRACE("float");
- ShortyTypeCharacteristics<float, ShortyFieldType::kFloat> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Double test
- {
- SCOPED_TRACE("double");
- ShortyTypeCharacteristics<double, ShortyFieldType::kDouble> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_wide_ = true;
- chars.CheckExpects();
- }
-
- // Object test
- {
- SCOPED_TRACE("object");
- ShortyTypeCharacteristics<mirror::Object*, ShortyFieldType::kObject> chars;
- chars.is_object_ = true;
- chars.size_ = kObjectReferenceSize;
- chars.CheckExpects();
- EXPECT_EQ(kObjectReferenceSize, sizeof(mirror::CompressedReference<mirror::Object>));
- }
-
- // Lambda test
- {
- SCOPED_TRACE("lambda");
- ShortyTypeCharacteristics<Closure*, ShortyFieldType::kLambda> chars;
- chars.is_lambda_ = true;
- chars.is_dynamic_sized_ = true;
- chars.CheckExpects();
- }
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 8f5419c..8ad47eb 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -636,8 +636,9 @@
static_assert(sizeof(Primitive::Type) == sizeof(int32_t),
"art::Primitive::Type and int32_t have different sizes.");
int32_t v32 = GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_));
- Primitive::Type type = static_cast<Primitive::Type>(v32 & 0xFFFF);
- DCHECK_EQ(static_cast<size_t>(v32 >> 16), Primitive::ComponentSizeShift(type));
+ Primitive::Type type = static_cast<Primitive::Type>(v32 & kPrimitiveTypeMask);
+ DCHECK_EQ(static_cast<size_t>(v32 >> kPrimitiveTypeSizeShiftShift),
+ Primitive::ComponentSizeShift(type));
return type;
}
@@ -646,8 +647,9 @@
static_assert(sizeof(Primitive::Type) == sizeof(int32_t),
"art::Primitive::Type and int32_t have different sizes.");
int32_t v32 = GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_));
- size_t size_shift = static_cast<Primitive::Type>(v32 >> 16);
- DCHECK_EQ(size_shift, Primitive::ComponentSizeShift(static_cast<Primitive::Type>(v32 & 0xFFFF)));
+ size_t size_shift = static_cast<Primitive::Type>(v32 >> kPrimitiveTypeSizeShiftShift);
+ DCHECK_EQ(size_shift,
+ Primitive::ComponentSizeShift(static_cast<Primitive::Type>(v32 & kPrimitiveTypeMask)));
return size_shift;
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 5c490de..8f6ce44 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -64,6 +64,12 @@
// 2 ref instance fields.]
static constexpr uint32_t kClassWalkSuper = 0xC0000000;
+ // Shift primitive type by kPrimitiveTypeSizeShiftShift to get the component type size shift
+ // Used for computing array size as follows:
+ // array_bytes = header_size + (elements << (primitive_type >> kPrimitiveTypeSizeShiftShift))
+ static constexpr uint32_t kPrimitiveTypeSizeShiftShift = 16;
+ static constexpr uint32_t kPrimitiveTypeMask = (1u << kPrimitiveTypeSizeShiftShift) - 1;
+
// Class Status
//
// kStatusRetired: Class that's temporarily used till class linking time
@@ -371,10 +377,10 @@
void SetPrimitiveType(Primitive::Type new_type) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t));
- int32_t v32 = static_cast<int32_t>(new_type);
- DCHECK_EQ(v32 & 0xFFFF, v32) << "upper 16 bits aren't zero";
+ uint32_t v32 = static_cast<uint32_t>(new_type);
+ DCHECK_EQ(v32 & kPrimitiveTypeMask, v32) << "upper 16 bits aren't zero";
// Store the component size shift in the upper 16 bits.
- v32 |= Primitive::ComponentSizeShift(new_type) << 16;
+ v32 |= Primitive::ComponentSizeShift(new_type) << kPrimitiveTypeSizeShiftShift;
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), v32);
}
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 595a47b..c7e4f8b 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -583,12 +583,6 @@
args.Set(M::HeapGrowthLimit, args.GetOrDefault(M::MemoryMaximumSize));
}
- if (args.GetOrDefault(M::Experimental) & ExperimentalFlags::kLambdas) {
- LOG(WARNING) << "Experimental lambdas have been enabled. All lambda opcodes have "
- << "an unstable specification and are nearly guaranteed to change over time. "
- << "Do not attempt to write shipping code against these opcodes.";
- }
-
*runtime_options = std::move(args);
return true;
}
@@ -709,8 +703,6 @@
UsageMessage(stream, " -X[no]image-dex2oat (Whether to create and use a boot image)\n");
UsageMessage(stream, " -Xno-dex-file-fallback "
"(Don't fall back to dex files without oat files)\n");
- UsageMessage(stream, " -Xexperimental:lambdas "
- "(Enable new and experimental dalvik opcodes and semantics)\n");
UsageMessage(stream, "\n");
UsageMessage(stream, "The following previously supported Dalvik options are ignored:\n");
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 265587d..3245ba0 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -41,12 +41,12 @@
DCHECK_NE(method, GetImtConflictMethod());
DCHECK_NE(method, GetResolutionMethod());
// Don't use GetCalleeSaveMethod(), some tests don't set all callee save methods.
- if (method == GetCalleeSaveMethodUnchecked(Runtime::kRefsAndArgs)) {
- return GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
- } else if (method == GetCalleeSaveMethodUnchecked(Runtime::kSaveAll)) {
- return GetCalleeSaveMethodFrameInfo(Runtime::kSaveAll);
- } else if (method == GetCalleeSaveMethodUnchecked(Runtime::kRefsOnly)) {
- return GetCalleeSaveMethodFrameInfo(Runtime::kRefsOnly);
+ if (method == GetCalleeSaveMethodUnchecked(Runtime::kSaveRefsAndArgs)) {
+ return GetCalleeSaveMethodFrameInfo(Runtime::kSaveRefsAndArgs);
+ } else if (method == GetCalleeSaveMethodUnchecked(Runtime::kSaveAllCalleeSaves)) {
+ return GetCalleeSaveMethodFrameInfo(Runtime::kSaveAllCalleeSaves);
+ } else if (method == GetCalleeSaveMethodUnchecked(Runtime::kSaveRefsOnly)) {
+ return GetCalleeSaveMethodFrameInfo(Runtime::kSaveRefsOnly);
} else {
DCHECK_EQ(method, GetCalleeSaveMethodUnchecked(Runtime::kSaveEverything));
return GetCalleeSaveMethodFrameInfo(Runtime::kSaveEverything);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 50bea65..68fa0d3 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -78,7 +78,6 @@
#include "jit/jit.h"
#include "jni_internal.h"
#include "linear_alloc.h"
-#include "lambda/box_table.h"
#include "mirror/array.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -443,7 +442,6 @@
GetMonitorList()->SweepMonitorList(visitor);
GetJavaVM()->SweepJniWeakGlobals(visitor);
GetHeap()->SweepAllocationRecords(visitor);
- GetLambdaBoxTable()->SweepWeakBoxedLambdas(visitor);
}
bool Runtime::ParseOptions(const RuntimeOptions& raw_options,
@@ -1016,9 +1014,6 @@
jit_options_->SetSaveProfilingInfo(false);
}
- // Allocate a global table of boxed lambda objects <-> closures.
- lambda_box_table_ = MakeUnique<lambda::BoxTable>();
-
// Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
// can't be trimmed as easily.
const bool use_malloc = IsAotCompiler();
@@ -1639,7 +1634,6 @@
intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
java_vm_->DisallowNewWeakGlobals();
heap_->DisallowNewAllocationRecords();
- lambda_box_table_->DisallowNewWeakBoxedLambdas();
}
void Runtime::AllowNewSystemWeaks() {
@@ -1648,7 +1642,6 @@
intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping.
java_vm_->AllowNewWeakGlobals();
heap_->AllowNewAllocationRecords();
- lambda_box_table_->AllowNewWeakBoxedLambdas();
}
void Runtime::BroadcastForNewSystemWeaks() {
@@ -1659,7 +1652,6 @@
intern_table_->BroadcastForNewInterns();
java_vm_->BroadcastForNewWeakGlobals();
heap_->BroadcastForNewAllocationRecords();
- lambda_box_table_->BroadcastForNewWeakBoxedLambdas();
}
void Runtime::SetInstructionSet(InstructionSet instruction_set) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 7e269af..c971646 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -54,10 +54,6 @@
class JitOptions;
} // namespace jit
-namespace lambda {
- class BoxTable;
-} // namespace lambda
-
namespace mirror {
class ClassLoader;
class Array;
@@ -387,11 +383,11 @@
// Returns a special method that describes all callee saves being spilled to the stack.
enum CalleeSaveType {
- kSaveAll, // All callee-save registers.
- kRefsOnly,
- kRefsAndArgs,
- kSaveEverything, // Even caller-save registers.
- kLastCalleeSaveType // Value used for iteration
+ kSaveAllCalleeSaves, // All callee-save registers.
+ kSaveRefsOnly, // Only those callee-save registers that can hold references.
+ kSaveRefsAndArgs, // References (see above) and arguments (usually caller-save registers).
+ kSaveEverything, // All registers, including both callee-save and caller-save.
+ kLastCalleeSaveType // Value used for iteration
};
bool HasCalleeSaveMethod(CalleeSaveType type) const {
@@ -553,10 +549,6 @@
return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
}
- lambda::BoxTable* GetLambdaBoxTable() const {
- return lambda_box_table_.get();
- }
-
// Create the JIT and instrumentation and code cache.
void CreateJit();
@@ -740,8 +732,6 @@
std::unique_ptr<jit::Jit> jit_;
std::unique_ptr<jit::JitOptions> jit_options_;
- std::unique_ptr<lambda::BoxTable> lambda_box_table_;
-
// Fault message, printed when we get a SIGSEGV.
Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::string fault_message_ GUARDED_BY(fault_message_lock_);
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 31206b5..b95dfad 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -117,7 +117,7 @@
RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback)
RUNTIME_OPTIONS_KEY (std::string, CpuAbiList)
RUNTIME_OPTIONS_KEY (std::string, Fingerprint)
-RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::kNone) // -Xexperimental:{none, lambdas}
+RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::kNone) // -Xexperimental:{none}
// Not parse-able from command line, but can be provided explicitly.
// (Do not add anything here that is defined in ParsedOptions::MakeParser)
diff --git a/runtime/simulator/Android.mk b/runtime/simulator/Android.mk
index 953a377..a34a841 100644
--- a/runtime/simulator/Android.mk
+++ b/runtime/simulator/Android.mk
@@ -88,9 +88,9 @@
LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
# For simulator_arm64.
ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_SHARED_LIBRARIES += libvixl-arm64
+ LOCAL_SHARED_LIBRARIES += libvixld-arm64
else
- LOCAL_SHARED_LIBRARIES += libvixl-arm64
+ LOCAL_SHARED_LIBRARIES += libvixl-arm64
endif
ifeq ($$(art_target_or_host),target)
include $(BUILD_SHARED_LIBRARY)
diff --git a/runtime/stack.cc b/runtime/stack.cc
index dc5cada..ababf78 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -729,7 +729,7 @@
Runtime* runtime = Runtime::Current();
if (method->IsAbstract()) {
- return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kSaveRefsAndArgs);
}
// This goes before IsProxyMethod since runtime methods have a null declaring class.
@@ -743,7 +743,7 @@
// compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader.
DCHECK(!method->IsDirect() && !method->IsConstructor())
<< "Constructors of proxy classes must have a OatQuickMethodHeader";
- return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kSaveRefsAndArgs);
}
// The only remaining case is if the method is native and uses the generic JNI stub.
@@ -755,7 +755,8 @@
// Generic JNI frame.
uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method) + 1;
size_t scope_size = HandleScope::SizeOf(handle_refs);
- QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ QuickMethodFrameInfo callee_info =
+ runtime->GetCalleeSaveMethodFrameInfo(Runtime::kSaveRefsAndArgs);
// Callee saves + handle scope + method ref + alignment
// Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
@@ -833,10 +834,12 @@
const instrumentation::InstrumentationStackFrame& instrumentation_frame =
GetInstrumentationStackFrame(thread_, instrumentation_stack_depth);
instrumentation_stack_depth++;
- if (GetMethod() == Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAll)) {
+ if (GetMethod() ==
+ Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAllCalleeSaves)) {
// Skip runtime save all callee frames which are used to deliver exceptions.
} else if (instrumentation_frame.interpreter_entry_) {
- ArtMethod* callee = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+ ArtMethod* callee =
+ Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs);
CHECK_EQ(GetMethod(), callee) << "Expected: " << PrettyMethod(callee) << " Found: "
<< PrettyMethod(GetMethod());
} else {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 03de399..40f12e9 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1157,9 +1157,6 @@
case Instruction::kVerifyRegCWide:
result = result && CheckWideRegisterIndex(inst->VRegC());
break;
- case Instruction::kVerifyRegCString:
- result = result && CheckStringIndex(inst->VRegC());
- break;
}
switch (inst->GetVerifyExtraFlags()) {
case Instruction::kVerifyArrayData:
@@ -3331,69 +3328,15 @@
}
break;
}
- case Instruction::INVOKE_LAMBDA: {
- // Don't bother verifying, instead the interpreter will take the slow path with access checks.
- // If the code would've normally hard-failed, then the interpreter will throw the
- // appropriate verification errors at runtime.
- Fail(VERIFY_ERROR_FORCE_INTERPRETER); // TODO(iam): implement invoke-lambda verification
- break;
- }
- case Instruction::CAPTURE_VARIABLE: {
- // Don't bother verifying, instead the interpreter will take the slow path with access checks.
- // If the code would've normally hard-failed, then the interpreter will throw the
- // appropriate verification errors at runtime.
- Fail(VERIFY_ERROR_FORCE_INTERPRETER); // TODO(iam): implement capture-variable verification
- break;
- }
- case Instruction::CREATE_LAMBDA: {
- // Don't bother verifying, instead the interpreter will take the slow path with access checks.
- // If the code would've normally hard-failed, then the interpreter will throw the
- // appropriate verification errors at runtime.
- Fail(VERIFY_ERROR_FORCE_INTERPRETER); // TODO(iam): implement create-lambda verification
- break;
- }
- case Instruction::LIBERATE_VARIABLE: {
- // Don't bother verifying, instead the interpreter will take the slow path with access checks.
- // If the code would've normally hard-failed, then the interpreter will throw the
- // appropriate verification errors at runtime.
- Fail(VERIFY_ERROR_FORCE_INTERPRETER); // TODO(iam): implement liberate-variable verification
- break;
- }
-
- case Instruction::UNUSED_F4: {
- DCHECK(false); // TODO(iam): Implement opcodes for lambdas
- // Conservatively fail verification on release builds.
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Unexpected opcode " << inst->DumpString(dex_file_);
- break;
- }
-
- case Instruction::BOX_LAMBDA: {
- // Don't bother verifying, instead the interpreter will take the slow path with access checks.
- // If the code would've normally hard-failed, then the interpreter will throw the
- // appropriate verification errors at runtime.
- Fail(VERIFY_ERROR_FORCE_INTERPRETER); // TODO(iam): implement box-lambda verification
-
- // Partial verification. Sets the resulting type to always be an object, which
- // is good enough for some other verification to occur without hard-failing.
- const uint32_t vreg_target_object = inst->VRegA_22x(); // box-lambda vA, vB
- const RegType& reg_type = reg_types_.JavaLangObject(need_precise_constants_);
- work_line_->SetRegisterType<LockOp::kClear>(this, vreg_target_object, reg_type);
- break;
- }
-
- case Instruction::UNBOX_LAMBDA: {
- // Don't bother verifying, instead the interpreter will take the slow path with access checks.
- // If the code would've normally hard-failed, then the interpreter will throw the
- // appropriate verification errors at runtime.
- Fail(VERIFY_ERROR_FORCE_INTERPRETER); // TODO(iam): implement unbox-lambda verification
- break;
- }
/* These should never appear during verification. */
case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
- case Instruction::UNUSED_FA ... Instruction::UNUSED_FF:
+ case Instruction::UNUSED_F3 ... Instruction::UNUSED_F9:
+ case Instruction::UNUSED_FC ... Instruction::UNUSED_FF:
case Instruction::UNUSED_79:
case Instruction::UNUSED_7A:
+ case Instruction::INVOKE_POLYMORPHIC:
+ case Instruction::INVOKE_POLYMORPHIC_RANGE:
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Unexpected opcode " << inst->DumpString(dex_file_);
break;
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 2592a21..5fe95c2 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -86,13 +86,10 @@
VERIFY_ERROR_ACCESS_METHOD = 128, // IllegalAccessError.
VERIFY_ERROR_CLASS_CHANGE = 256, // IncompatibleClassChangeError.
VERIFY_ERROR_INSTANTIATION = 512, // InstantiationError.
- // For opcodes that don't have complete verifier support (such as lambda opcodes),
- // we need a way to continue execution at runtime without attempting to re-verify
- // (since we know it will fail no matter what). Instead, run as the interpreter
- // in a special "do access checks" mode which will perform verifier-like checking
- // on the fly.
- //
- // TODO: Once all new opcodes have implemented full verifier support, this can be removed.
+ // For opcodes that don't have complete verifier support, we need a way to continue
+ // execution at runtime without attempting to re-verify (since we know it will fail no
+ // matter what). Instead, run as the interpreter in a special "do access checks" mode
+ // which will perform verifier-like checking on the fly.
VERIFY_ERROR_FORCE_INTERPRETER = 1024, // Skip the verification phase at runtime;
// force the interpreter to do access checks.
// (sets a soft fail at compile time).
diff --git a/test/538-checker-embed-constants/src/Main.java b/test/538-checker-embed-constants/src/Main.java
index f791adf..f6713a2 100644
--- a/test/538-checker-embed-constants/src/Main.java
+++ b/test/538-checker-embed-constants/src/Main.java
@@ -473,7 +473,7 @@
}
/**
- * Test that the `-1` constant is not synthesized in a register and that we
+ * ARM/ARM64: Test that the `-1` constant is not synthesized in a register and that we
* instead simply switch between `add` and `sub` instructions with the
* constant embedded.
* We need two uses (or more) of the constant because the compiler always
@@ -491,10 +491,137 @@
/// CHECK: sub x{{\d+}}, x{{\d+}}, #0x1
/// CHECK: add x{{\d+}}, x{{\d+}}, #0x1
+ /// CHECK-START-ARM: long Main.addM1(long) register (after)
+ /// CHECK: <<Arg:j\d+>> ParameterValue
+ /// CHECK: <<ConstM1:j\d+>> LongConstant -1
+ /// CHECK-NOT: ParallelMove
+ /// CHECK: Add [<<Arg>>,<<ConstM1>>]
+ /// CHECK: Sub [<<Arg>>,<<ConstM1>>]
+
+ /// CHECK-START-ARM: long Main.addM1(long) disassembly (after)
+ /// CHECK: <<Arg:j\d+>> ParameterValue
+ /// CHECK: <<ConstM1:j\d+>> LongConstant -1
+ /// CHECK: Add [<<Arg>>,<<ConstM1>>]
+ /// CHECK-NEXT: subs r{{\d+}}, #1
+ /// CHECK-NEXT: adc r{{\d+}}, r{{\d+}}, #-1
+ /// CHECK: Sub [<<Arg>>,<<ConstM1>>]
+ /// CHECK-NEXT: adds r{{\d+}}, #1
+ /// CHECK-NEXT: adc r{{\d+}}, r{{\d+}}, #0
+
public static long addM1(long arg) {
return (arg + (-1)) | (arg - (-1));
}
+ /**
+ * ARM: Test that some long constants are not synthesized in a register for add-long.
+ * Also test some negative cases where we do synthetize constants in registers.
+ */
+
+ /// CHECK-START-ARM: long Main.addLongConstants(long) disassembly (after)
+ /// CHECK: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: <<ConstA:j\d+>> LongConstant 4486007727657233
+ /// CHECK-DAG: <<ConstB:j\d+>> LongConstant 4486011735248896
+ /// CHECK-DAG: <<ConstC:j\d+>> LongConstant -1071856711330889728
+ /// CHECK-DAG: <<ConstD:j\d+>> LongConstant 17587891077120
+ /// CHECK-DAG: <<ConstE:j\d+>> LongConstant -8808977924096
+ /// CHECK-DAG: <<ConstF:j\d+>> LongConstant 17587891077121
+ /// CHECK-DAG: <<ConstG:j\d+>> LongConstant 4095
+ /// CHECK: Add [<<Arg>>,<<ConstA>>]
+ /// CHECK-NEXT: adds r{{\d+}}, r{{\d+}}, #286331153
+ /// CHECK-NEXT: adc r{{\d+}}, r{{\d+}}, #1044480
+ /// CHECK: Add [<<Arg>>,<<ConstB>>]
+ /// CHECK-NEXT: subs r{{\d+}}, r{{\d+}}, #1044480
+ /// CHECK-NEXT: adc r{{\d+}}, r{{\d+}}, #1044480
+ /// CHECK: Add [<<Arg>>,<<ConstC>>]
+ /// CHECK-NEXT: subs r{{\d+}}, r{{\d+}}, #16711680
+ /// CHECK-NEXT: sbc r{{\d+}}, r{{\d+}}, #249561088
+ /// CHECK: Add [<<Arg>>,<<ConstD>>]
+ // There may or may not be a MOV here.
+ /// CHECK: addw r{{\d+}}, r{{\d+}}, #4095
+ /// CHECK: Add [<<Arg>>,<<ConstE>>]
+ // There may or may not be a MOV here.
+ /// CHECK: subw r{{\d+}}, r{{\d+}}, #2051
+ /// CHECK: Add [<<Arg>>,<<ConstF>>]
+ /// CHECK-NEXT: adds{{(\.w)?}} r{{\d+}}, r{{\d+}}, r{{\d+}}
+ /// CHECK-NEXT: adc{{(\.w)?}} r{{\d+}}, r{{\d+}}, r{{\d+}}
+ /// CHECK: Add [<<Arg>>,<<ConstG>>]
+ /// CHECK-NEXT: adds{{(\.w)?}} r{{\d+}}, r{{\d+}}, r{{\d+}}
+ /// CHECK-NEXT: adc{{(\.w)?}} r{{\d+}}, r{{\d+}}, r{{\d+}}
+
+ public static long addLongConstants(long arg) {
+ return
+ // Modified immediates.
+ (arg + 0x000ff00011111111L) ^ // 4486007727657233
+ // Modified immediates high and -low.
+ (arg + 0x000ff000fff01000L) ^ // 4486011735248896
+ // Modified immediates ~high and -low.
+ (arg + 0xf11fffffff010000L) ^ // -1071856711330889728
+ // Low word 0 (no carry), high is imm12.
+ (arg + 0x00000fff00000000L) ^ // 17587891077120
+ // Low word 0 (no carry), -high is imm12.
+ (arg + 0xfffff7fd00000000L) ^ // -8808977924096
+ // Cannot embed imm12 in ADC/SBC for high word.
+ (arg + 0x00000fff00000001L) ^ // 17587891077121
+ // Cannot embed imm12 in ADDS/SUBS for low word (need to set flags).
+ (arg + 0x0000000000000fffL) ^ // 4095
+ arg;
+ }
+
+ /**
+ * ARM: Test that some long constants are not synthesized in a register for add-long.
+ * Also test some negative cases where we do synthetize constants in registers.
+ */
+
+ /// CHECK-START-ARM: long Main.subLongConstants(long) disassembly (after)
+ /// CHECK: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: <<ConstA:j\d+>> LongConstant 4486007727657233
+ /// CHECK-DAG: <<ConstB:j\d+>> LongConstant 4486011735248896
+ /// CHECK-DAG: <<ConstC:j\d+>> LongConstant -1071856711330889728
+ /// CHECK-DAG: <<ConstD:j\d+>> LongConstant 17587891077120
+ /// CHECK-DAG: <<ConstE:j\d+>> LongConstant -8808977924096
+ /// CHECK-DAG: <<ConstF:j\d+>> LongConstant 17587891077121
+ /// CHECK-DAG: <<ConstG:j\d+>> LongConstant 4095
+ /// CHECK: Sub [<<Arg>>,<<ConstA>>]
+ /// CHECK-NEXT: subs r{{\d+}}, r{{\d+}}, #286331153
+ /// CHECK-NEXT: sbc r{{\d+}}, r{{\d+}}, #1044480
+ /// CHECK: Sub [<<Arg>>,<<ConstB>>]
+ /// CHECK-NEXT: adds r{{\d+}}, r{{\d+}}, #1044480
+ /// CHECK-NEXT: sbc r{{\d+}}, r{{\d+}}, #1044480
+ /// CHECK: Sub [<<Arg>>,<<ConstC>>]
+ /// CHECK-NEXT: adds r{{\d+}}, r{{\d+}}, #16711680
+ /// CHECK-NEXT: adc r{{\d+}}, r{{\d+}}, #249561088
+ /// CHECK: Sub [<<Arg>>,<<ConstD>>]
+ // There may or may not be a MOV here.
+ /// CHECK: subw r{{\d+}}, r{{\d+}}, #4095
+ /// CHECK: Sub [<<Arg>>,<<ConstE>>]
+ // There may or may not be a MOV here.
+ /// CHECK: addw r{{\d+}}, r{{\d+}}, #2051
+ /// CHECK: Sub [<<Arg>>,<<ConstF>>]
+ /// CHECK-NEXT: subs{{(\.w)?}} r{{\d+}}, r{{\d+}}, r{{\d+}}
+ /// CHECK-NEXT: sbc{{(\.w)?}} r{{\d+}}, r{{\d+}}, r{{\d+}}
+ /// CHECK: Sub [<<Arg>>,<<ConstG>>]
+ /// CHECK-NEXT: subs{{(\.w)?}} r{{\d+}}, r{{\d+}}, r{{\d+}}
+ /// CHECK-NEXT: sbc{{(\.w)?}} r{{\d+}}, r{{\d+}}, r{{\d+}}
+
+ public static long subLongConstants(long arg) {
+ return
+ // Modified immediates.
+ (arg - 0x000ff00011111111L) ^ // 4486007727657233
+ // Modified immediates high and -low.
+ (arg - 0x000ff000fff01000L) ^ // 4486011735248896
+ // Modified immediates ~high and -low.
+ (arg - 0xf11fffffff010000L) ^ // -1071856711330889728
+ // Low word 0 (no carry), high is imm12.
+ (arg - 0x00000fff00000000L) ^ // 17587891077120
+ // Low word 0 (no carry), -high is imm12.
+ (arg - 0xfffff7fd00000000L) ^ // -8808977924096
+ // Cannot embed imm12 in ADC/SBC for high word.
+ (arg - 0x00000fff00000001L) ^ // 17587891077121
+ // Cannot embed imm12 in ADDS/SUBS for low word (need to set flags).
+ (arg - 0x0000000000000fffL) ^ // 4095
+ arg;
+ }
+
public static void main(String[] args) {
int arg = 0x87654321;
assertIntEquals(and255(arg), 0x21);
@@ -522,7 +649,7 @@
assertLongEquals(xor0xfffffff00000000f(longArg), 0xedcba9888765432eL);
assertLongEquals(xor0xf00000000000000f(longArg), 0xe23456788765432eL);
- assertLongEquals(14, addM1(7));
+ assertLongEquals(14L, addM1(7));
assertLongEquals(shl1(longArg), 0x2468acf10eca8642L);
assertLongEquals(shl2(longArg), 0x48d159e21d950c84L);
@@ -562,5 +689,30 @@
assertLongEquals(ushr32(~longArg), 0x00000000edcba987L);
assertLongEquals(ushr33(~longArg), 0x0000000076e5d4c3L);
assertLongEquals(ushr63(~longArg), 0x0000000000000001L);
+
+ // Test -1, 0, +1 and arbitrary constants just before and after overflow
+ // on low word in subexpressions of addLongConstants()/subLongConstants(),
+ // so that we check that we carry the overflow correctly to the high word.
+ // For example
+ // 0x111eeeeeeee+0x000ff00011111111 = 0x000ff111ffffffff (carry=0),
+ // 0x111eeeeeeef+0x000ff00011111111 = 0x000ff11200000000 (carry=1).
+ assertLongEquals(0xf11ff7fdee1e1111L, addLongConstants(0xffffffffffffffffL));
+ assertLongEquals(0xee0080211e00eefL, addLongConstants(0x0L));
+ assertLongEquals(0xee0080211e01111L, addLongConstants(0x1L));
+ assertLongEquals(0xedff81c12201113L, addLongConstants(0x111eeeeeeeeL));
+ assertLongEquals(0xedff81feddfeef1L, addLongConstants(0x111eeeeeeefL));
+ assertLongEquals(0xedff83e11c1f111L, addLongConstants(0x222000fefffL));
+ assertLongEquals(0xedff83fee3e0eefL, addLongConstants(0x222000ff000L));
+ assertLongEquals(0xedff805edfe1111L, addLongConstants(0x33300feffffL));
+ assertLongEquals(0xedff80412000eefL, addLongConstants(0x33300ff0000L));
+ assertLongEquals(0xee0080211e00eefL, subLongConstants(0xffffffffffffffffL));
+ assertLongEquals(0xf11ff7fdee1e1111L, subLongConstants(0x0L));
+ assertLongEquals(0xf11ff7fc11e1eef3L, subLongConstants(0x1L));
+ assertLongEquals(0xee0080412201113L, subLongConstants(0x44411111111L));
+ assertLongEquals(0xee0080412201111L, subLongConstants(0x44411111112L));
+ assertLongEquals(0xee0080e11c1f111L, subLongConstants(0x555fff01000L));
+ assertLongEquals(0xee0080e11c1eef3L, subLongConstants(0x555fff01001L));
+ assertLongEquals(0xee0080dedfe1111L, subLongConstants(0x666ff010000L));
+ assertLongEquals(0xee0080dedffeef3L, subLongConstants(0x666ff010001L));
}
}
diff --git a/test/617-clinit-oome/expected.txt b/test/617-clinit-oome/expected.txt
new file mode 100644
index 0000000..c1d33ff
--- /dev/null
+++ b/test/617-clinit-oome/expected.txt
@@ -0,0 +1 @@
+Filling heap
diff --git a/test/617-clinit-oome/info.txt b/test/617-clinit-oome/info.txt
new file mode 100644
index 0000000..ece35b2
--- /dev/null
+++ b/test/617-clinit-oome/info.txt
@@ -0,0 +1 @@
+Regression test for encoded static strings caussing OOME b/30690988
diff --git a/test/617-clinit-oome/src/Main.java b/test/617-clinit-oome/src/Main.java
new file mode 100644
index 0000000..749a232
--- /dev/null
+++ b/test/617-clinit-oome/src/Main.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ Class klass = Other.class;
+ Object[] data = new Object[100000];
+ try {
+ System.out.println("Filling heap");
+ int size = 256 * 1024 * 1024;
+ int index = 0;
+ while (true) {
+ try {
+ data[index] = new byte[size];
+ index++;
+ } catch (OutOfMemoryError e) {
+ size /= 2;
+ if (size == 0) {
+ break;
+ }
+ }
+ }
+ // Initialize now that the heap is full.
+ Other.print();
+ } catch (OutOfMemoryError e) {
+ } catch (Exception e) {
+ System.err.println(e);
+ }
+ }
+}
diff --git a/test/617-clinit-oome/src/Other.java b/test/617-clinit-oome/src/Other.java
new file mode 100644
index 0000000..20306ee
--- /dev/null
+++ b/test/617-clinit-oome/src/Other.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public final class Other {
+ public static final String string1 = "ABCDEFG1";
+ public static final String string2 = "ABCDEFG2";
+ public static final String string3 = "ABCDEFG3";
+ public static final String string4 = "ABCDEFG4";
+ public static final String string5 = "ABCDEFG5";
+ public static final int int1 = 12;
+
+ public static void print() {
+ System.out.println(string2);
+ }
+}
diff --git a/test/955-lambda-smali/build b/test/955-lambda-smali/build
deleted file mode 100755
index 14230c2..0000000
--- a/test/955-lambda-smali/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# make us exit on a failure
-set -e
-
-./default-build "$@" --experimental default-methods
diff --git a/test/955-lambda-smali/expected.txt b/test/955-lambda-smali/expected.txt
deleted file mode 100644
index 16381e4..0000000
--- a/test/955-lambda-smali/expected.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-SanityCheck
-Hello world! (0-args, no closure)
-ABCD Hello world! (4-args, no closure)
-Caught NPE
-(BoxUnbox) Hello boxing world! (0-args, no closure)
-(BoxUnbox) Boxing repeatedly yields referentially-equal objects
-(BoxUnbox) Caught NPE for unbox-lambda
-(BoxUnbox) Caught NPE for box-lambda
-(BoxUnbox) Caught ClassCastException for unbox-lambda
-(MoveResult) testZ success
-(MoveResult) testB success
-(MoveResult) testS success
-(MoveResult) testI success
-(MoveResult) testC success
-(MoveResult) testJ success
-(MoveResult) testF success
-(MoveResult) testD success
-(MoveResult) testL success
-(CaptureVariables) (0-args, 1 captured variable 'Z'): value is true
-(CaptureVariables) (0-args, 1 captured variable 'B'): value is R
-(CaptureVariables) (0-args, 1 captured variable 'C'): value is ∂
-(CaptureVariables) (0-args, 1 captured variable 'S'): value is 1000
-(CaptureVariables) (0-args, 1 captured variable 'I'): value is 12345678
-(CaptureVariables) (0-args, 1 captured variable 'J'): value is 3287471278325742
-(CaptureVariables) (0-args, 1 captured variable 'F'): value is Infinity
-(CaptureVariables) (0-args, 1 captured variable 'D'): value is -Infinity
-(CaptureVariables) (0-args, 8 captured variable 'ZBCSIJFD'): value is true,R,∂,1000,12345678,3287471278325742,Infinity,-Infinity
-(CaptureVariables) Caught NPE
diff --git a/test/955-lambda-smali/info.txt b/test/955-lambda-smali/info.txt
deleted file mode 100644
index aed5e84..0000000
--- a/test/955-lambda-smali/info.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-Smali-based tests for experimental lambda intructions.
-
-Obviously needs to run under ART.
diff --git a/test/955-lambda-smali/run b/test/955-lambda-smali/run
deleted file mode 100755
index 2fb2f89..0000000
--- a/test/955-lambda-smali/run
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Ensure that the lambda experimental opcodes are turned on for dalvikvm and dex2oat
-${RUN} "$@" --experimental lambdas
diff --git a/test/955-lambda-smali/smali/BoxUnbox.smali b/test/955-lambda-smali/smali/BoxUnbox.smali
deleted file mode 100644
index 915de2d..0000000
--- a/test/955-lambda-smali/smali/BoxUnbox.smali
+++ /dev/null
@@ -1,168 +0,0 @@
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-.class public LBoxUnbox;
-.super Ljava/lang/Object;
-
-.method public constructor <init>()V
-.registers 1
- invoke-direct {p0}, Ljava/lang/Object;-><init>()V
- return-void
-.end method
-
-.method public static run()V
- .registers 0
-
- invoke-static {}, LBoxUnbox;->testBox()V
- invoke-static {}, LBoxUnbox;->testBoxEquality()V
- invoke-static {}, LBoxUnbox;->testFailures()V
- invoke-static {}, LBoxUnbox;->testFailures2()V
- invoke-static {}, LBoxUnbox;->testFailures3()V
- invoke-static {}, LBoxUnbox;->forceGC()V
-
- return-void
-.end method
-
-#TODO: should use a closure type instead of ArtMethod.
-.method public static doHelloWorld(J)V
- .registers 4 # 1 wide parameters, 2 locals
-
- const-string v0, "(BoxUnbox) Hello boxing world! (0-args, no closure)"
-
- sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v1, v0}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
-
- return-void
-.end method
-
-# Test boxing and unboxing; the same lambda should be invoked as if there was no box.
-.method private static testBox()V
- .registers 3
-
- create-lambda v0, LBoxUnbox;->doHelloWorld(J)V
- box-lambda v2, v0 # v2 = box(v0)
- unbox-lambda v0, v2, J # v0 = unbox(v2)
- invoke-lambda v0, {}
-
- return-void
-.end method
-
-# Test that boxing the same lambda twice yield the same object.
-.method private static testBoxEquality()V
- .registers 6 # 0 parameters, 6 locals
-
- create-lambda v0, LBoxUnbox;->doHelloWorld(J)V
- box-lambda v2, v0 # v2 = box(v0)
- box-lambda v3, v0 # v3 = box(v0)
-
- # The objects should be not-null, and they should have the same reference
- if-eqz v2, :is_zero
- if-ne v2, v3, :is_not_equal
-
- const-string v4, "(BoxUnbox) Boxing repeatedly yields referentially-equal objects"
- goto :end
-
-:is_zero
- const-string v4, "(BoxUnbox) Boxing repeatedly FAILED: boxing returned null"
- goto :end
-
-:is_not_equal
- const-string v4, "(BoxUnbox) Boxing repeatedly FAILED: objects were not same reference"
- goto :end
-
-:end
- sget-object v5, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v5, v4}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
- return-void
-.end method
-
-# Test exceptions are thrown as expected when used opcodes incorrectly
-.method private static testFailures()V
- .registers 4 # 0 parameters, 4 locals
-
- const v0, 0 # v0 = null
- const v1, 0 # v1 = null
-:start
- unbox-lambda v2, v0, J
- # attempting to unbox a null lambda will throw NPE
-:end
- return-void
-
-:handler
- const-string v2, "(BoxUnbox) Caught NPE for unbox-lambda"
- sget-object v3, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v3, v2}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
-
- return-void
-
- .catch Ljava/lang/NullPointerException; {:start .. :end} :handler
-.end method
-
-# Test exceptions are thrown as expected when used opcodes incorrectly
-.method private static testFailures2()V
- .registers 4 # 0 parameters, 4 locals
-
- const v0, 0 # v0 = null
- const v1, 0 # v1 = null
-:start
- box-lambda v2, v0 # attempting to box a null lambda will throw NPE
-:end
- return-void
-
- # TODO: refactor testFailures using a goto
-
-:handler
- const-string v2, "(BoxUnbox) Caught NPE for box-lambda"
- sget-object v3, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v3, v2}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
-
- return-void
-
- .catch Ljava/lang/NullPointerException; {:start .. :end} :handler
-.end method
-
-# Test exceptions are thrown as expected when used opcodes incorrectly
-.method private static testFailures3()V
- .registers 4 # 0 parameters, 4 locals
-
- const-string v0, "This is not a boxed lambda"
-:start
- # TODO: use \FunctionalType; here instead
- unbox-lambda v2, v0, J
- # can't use a string, expects a lambda object here. throws ClassCastException.
-:end
- return-void
-
- # TODO: refactor testFailures using a goto
-
-:handler
- const-string v2, "(BoxUnbox) Caught ClassCastException for unbox-lambda"
- sget-object v3, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v3, v2}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
-
- return-void
-
- .catch Ljava/lang/ClassCastException; {:start .. :end} :handler
-.end method
-
-
-# Force a GC. Used to ensure our weak reference table of boxed lambdas is getting swept.
-.method private static forceGC()V
- .registers 1
- invoke-static {}, Ljava/lang/Runtime;->getRuntime()Ljava/lang/Runtime;
- move-result-object v0
- invoke-virtual {v0}, Ljava/lang/Runtime;->gc()V
-
- return-void
-.end method
diff --git a/test/955-lambda-smali/smali/CaptureVariables.smali b/test/955-lambda-smali/smali/CaptureVariables.smali
deleted file mode 100644
index f18b7ff..0000000
--- a/test/955-lambda-smali/smali/CaptureVariables.smali
+++ /dev/null
@@ -1,311 +0,0 @@
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-.class public LCaptureVariables;
-.super Ljava/lang/Object;
-
-.method public constructor <init>()V
-.registers 1
- invoke-direct {p0}, Ljava/lang/Object;-><init>()V
- return-void
-.end method
-
-.method public static run()V
-.registers 8
- # Test boolean capture
- const v2, 1 # v2 = true
- capture-variable v2, "Z"
- create-lambda v0, LCaptureVariables;->printCapturedVariable_Z(J)V
- # TODO: create-lambda should not write to both v0 and v1
- invoke-lambda v0, {}
-
- # Test byte capture
- const v2, 82 # v2 = 82, 'R'
- capture-variable v2, "B"
- create-lambda v0, LCaptureVariables;->printCapturedVariable_B(J)V
- # TODO: create-lambda should not write to both v0 and v1
- invoke-lambda v0, {}
-
- # Test char capture
- const v2, 0x2202 # v2 = 0x2202, '∂'
- capture-variable v2, "C"
- create-lambda v0, LCaptureVariables;->printCapturedVariable_C(J)V
- # TODO: create-lambda should not write to both v0 and v1
- invoke-lambda v0, {}
-
- # Test short capture
- const v2, 1000 # v2 = 1000
- capture-variable v2, "S"
- create-lambda v0, LCaptureVariables;->printCapturedVariable_S(J)V
- # TODO: create-lambda should not write to both v0 and v1
- invoke-lambda v0, {}
-
- # Test int capture
- const v2, 12345678
- capture-variable v2, "I"
- create-lambda v0, LCaptureVariables;->printCapturedVariable_I(J)V
- # TODO: create-lambda should not write to both v0 and v1
- invoke-lambda v0, {}
-
- # Test long capture
- const-wide v2, 0x0badf00dc0ffeeL # v2 = 3287471278325742
- capture-variable v2, "J"
- create-lambda v0, LCaptureVariables;->printCapturedVariable_J(J)V
- # TODO: create-lambda should not write to both v0 and v1
- invoke-lambda v0, {}
-
- # Test float capture
- const v2, infinityf
- capture-variable v2, "F"
- create-lambda v0, LCaptureVariables;->printCapturedVariable_F(J)V
- # TODO: create-lambda should not write to both v0 and v1
- invoke-lambda v0, {}
-
- # Test double capture
- const-wide v2, -infinity
- capture-variable v2, "D"
- create-lambda v0, LCaptureVariables;->printCapturedVariable_D(J)V
- # TODO: create-lambda should not write to both v0 and v1
- invoke-lambda v0, {}
-
- #TODO: capture objects and lambdas once we have support for it
-
- # Test capturing multiple variables
- invoke-static {}, LCaptureVariables;->testMultipleCaptures()V
-
- # Test failures
- invoke-static {}, LCaptureVariables;->testFailures()V
-
- return-void
-.end method
-
-#TODO: should use a closure type instead of a long
-.method public static printCapturedVariable_Z(J)V
- .registers 5 # 1 wide parameter, 3 locals
-
- const-string v0, "(CaptureVariables) (0-args, 1 captured variable 'Z'): value is "
-
- sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v1, v0}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "Z"
- invoke-virtual {v1, v2}, Ljava/io/PrintStream;->println(Z)V
-
- return-void
-.end method
-
-#TODO: should use a closure type instead of a long
-.method public static printCapturedVariable_B(J)V
- .registers 5 # 1 wide parameter, 3 locals
-
- const-string v0, "(CaptureVariables) (0-args, 1 captured variable 'B'): value is "
-
- sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v1, v0}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "B"
- invoke-virtual {v1, v2}, Ljava/io/PrintStream;->println(C)V # no println(B), use char instead.
-
- return-void
-.end method
-
-#TODO: should use a closure type instead of a long
-.method public static printCapturedVariable_C(J)V
- .registers 5 # 1 wide parameter, 3 locals
-
- const-string v0, "(CaptureVariables) (0-args, 1 captured variable 'C'): value is "
-
- sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v1, v0}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "C"
- invoke-virtual {v1, v2}, Ljava/io/PrintStream;->println(C)V
-
- return-void
-.end method
-
-#TODO: should use a closure type instead of a long
-.method public static printCapturedVariable_S(J)V
- .registers 5 # 1 wide parameter, 3 locals
-
- const-string v0, "(CaptureVariables) (0-args, 1 captured variable 'S'): value is "
-
- sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v1, v0}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "S"
- invoke-virtual {v1, v2}, Ljava/io/PrintStream;->println(I)V # no println(S), use int instead
-
- return-void
-.end method
-
-#TODO: should use a closure type instead of a long
-.method public static printCapturedVariable_I(J)V
- .registers 5 # 1 wide parameter, 3 locals
-
- const-string v0, "(CaptureVariables) (0-args, 1 captured variable 'I'): value is "
-
- sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v1, v0}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "I"
- invoke-virtual {v1, v2}, Ljava/io/PrintStream;->println(I)V
-
- return-void
-.end method
-
-#TODO: should use a closure type instead of a long
-.method public static printCapturedVariable_J(J)V
- .registers 6 # 1 wide parameter, 4 locals
-
- const-string v0, "(CaptureVariables) (0-args, 1 captured variable 'J'): value is "
-
- sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v1, v0}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "J"
- invoke-virtual {v1, v2, v3}, Ljava/io/PrintStream;->println(J)V
-
- return-void
-.end method
-
-#TODO: should use a closure type instead of a long
-.method public static printCapturedVariable_F(J)V
- .registers 5 # 1 parameter, 4 locals
-
- const-string v0, "(CaptureVariables) (0-args, 1 captured variable 'F'): value is "
-
- sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v1, v0}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "F"
- invoke-virtual {v1, v2}, Ljava/io/PrintStream;->println(F)V
-
- return-void
-.end method
-
-#TODO: should use a closure type instead of a long
-.method public static printCapturedVariable_D(J)V
- .registers 6 # 1 wide parameter, 4 locals
-
- const-string v0, "(CaptureVariables) (0-args, 1 captured variable 'D'): value is "
-
- sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v1, v0}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "D"
- invoke-virtual {v1, v2, v3}, Ljava/io/PrintStream;->println(D)V
-
- return-void
-.end method
-
-# Test capturing more than one variable.
-.method private static testMultipleCaptures()V
- .registers 4 # 0 parameters, 4 locals
-
- const v2, 1 # v2 = true
- capture-variable v2, "Z"
-
- const v2, 82 # v2 = 82, 'R'
- capture-variable v2, "B"
-
- const v2, 0x2202 # v2 = 0x2202, '∂'
- capture-variable v2, "C"
-
- const v2, 1000 # v2 = 1000
- capture-variable v2, "S"
-
- const v2, 12345678
- capture-variable v2, "I"
-
- const-wide v2, 0x0badf00dc0ffeeL # v2 = 3287471278325742
- capture-variable v2, "J"
-
- const v2, infinityf
- capture-variable v2, "F"
-
- const-wide v2, -infinity
- capture-variable v2, "D"
-
- create-lambda v0, LCaptureVariables;->printCapturedVariable_ZBCSIJFD(J)V
- # TODO: create-lambda should not write to both v0 and v1
- invoke-lambda v0, {}
-
-.end method
-
-#TODO: should use a closure type instead of a long
-.method public static printCapturedVariable_ZBCSIJFD(J)V
- .registers 7 # 1 wide parameter, 5 locals
-
- const-string v0, "(CaptureVariables) (0-args, 8 captured variable 'ZBCSIJFD'): value is "
- const-string v4, ","
-
- sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v1, v0}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "Z"
- invoke-virtual {v1, v2}, Ljava/io/PrintStream;->print(Z)V
- invoke-virtual {v1, v4}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "B"
- invoke-virtual {v1, v2}, Ljava/io/PrintStream;->print(C)V
- invoke-virtual {v1, v4}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "C"
- invoke-virtual {v1, v2}, Ljava/io/PrintStream;->print(C)V
- invoke-virtual {v1, v4}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "S"
- invoke-virtual {v1, v2}, Ljava/io/PrintStream;->print(I)V
- invoke-virtual {v1, v4}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "I"
- invoke-virtual {v1, v2}, Ljava/io/PrintStream;->print(I)V
- invoke-virtual {v1, v4}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "J"
- invoke-virtual {v1, v2, v3}, Ljava/io/PrintStream;->print(J)V
- invoke-virtual {v1, v4}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "F"
- invoke-virtual {v1, v2}, Ljava/io/PrintStream;->print(F)V
- invoke-virtual {v1, v4}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- liberate-variable v2, p0, "D"
- invoke-virtual {v1, v2, v3}, Ljava/io/PrintStream;->println(D)V
-
- return-void
-.end method
-
-# Test exceptions are thrown as expected when used opcodes incorrectly
-.method private static testFailures()V
- .registers 4 # 0 parameters, 4 locals
-
- const v0, 0 # v0 = null
- const v1, 0 # v1 = null
-:start
- liberate-variable v0, v2, "Z" # invoking a null lambda shall raise an NPE
-:end
- return-void
-
-:handler
- const-string v2, "(CaptureVariables) Caught NPE"
- sget-object v3, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v3, v2}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
-
- return-void
-
- .catch Ljava/lang/NullPointerException; {:start .. :end} :handler
-.end method
diff --git a/test/955-lambda-smali/smali/Main.smali b/test/955-lambda-smali/smali/Main.smali
deleted file mode 100644
index 9892d61..0000000
--- a/test/955-lambda-smali/smali/Main.smali
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-.class public LMain;
-
-.super Ljava/lang/Object;
-
-.method public static main([Ljava/lang/String;)V
- .registers 2
-
- invoke-static {}, LSanityCheck;->run()I
- invoke-static {}, LTrivialHelloWorld;->run()V
- invoke-static {}, LBoxUnbox;->run()V
- invoke-static {}, LMoveResult;->run()V
- invoke-static {}, LCaptureVariables;->run()V
-
-# TODO: add tests when verification fails
-
- return-void
-.end method
diff --git a/test/955-lambda-smali/smali/MoveResult.smali b/test/955-lambda-smali/smali/MoveResult.smali
deleted file mode 100644
index 52f7ba3..0000000
--- a/test/955-lambda-smali/smali/MoveResult.smali
+++ /dev/null
@@ -1,330 +0,0 @@
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-.class public LMoveResult;
-.super Ljava/lang/Object;
-
-.method public constructor <init>()V
-.registers 1
- invoke-direct {p0}, Ljava/lang/Object;-><init>()V
- return-void
-.end method
-
-.method public static run()V
-.registers 8
- invoke-static {}, LMoveResult;->testZ()V
- invoke-static {}, LMoveResult;->testB()V
- invoke-static {}, LMoveResult;->testS()V
- invoke-static {}, LMoveResult;->testI()V
- invoke-static {}, LMoveResult;->testC()V
- invoke-static {}, LMoveResult;->testJ()V
- invoke-static {}, LMoveResult;->testF()V
- invoke-static {}, LMoveResult;->testD()V
- invoke-static {}, LMoveResult;->testL()V
-
- return-void
-.end method
-
-# Test that booleans are returned correctly via move-result.
-.method public static testZ()V
- .registers 6
-
- create-lambda v0, LMoveResult;->lambdaZ(J)Z
- invoke-lambda v0, {}
- move-result v2
- const v3, 1
-
- if-ne v3, v2, :is_not_equal
- const-string v4, "(MoveResult) testZ success"
- goto :end
-
-:is_not_equal
- const-string v4, "(MoveResult) testZ failed"
-
-:end
- sget-object v5, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v5, v4}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
- return-void
-
-.end method
-
-# Lambda target for testZ. Always returns "true".
-.method public static lambdaZ(J)Z
- .registers 3
-
- const v0, 1
- return v0
-
-.end method
-
-# Test that bytes are returned correctly via move-result.
-.method public static testB()V
- .registers 6
-
- create-lambda v0, LMoveResult;->lambdaB(J)B
- invoke-lambda v0, {}
- move-result v2
- const v3, 15
-
- if-ne v3, v2, :is_not_equal
- const-string v4, "(MoveResult) testB success"
- goto :end
-
-:is_not_equal
- const-string v4, "(MoveResult) testB failed"
-
-:end
- sget-object v5, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v5, v4}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
- return-void
-
-.end method
-
-# Lambda target for testB. Always returns "15".
-.method public static lambdaB(J)B
- .registers 3 # 1 parameters, 2 locals
-
- const v0, 15
- return v0
-
-.end method
-
-# Test that shorts are returned correctly via move-result.
-.method public static testS()V
- .registers 6
-
- create-lambda v0, LMoveResult;->lambdaS(J)S
- invoke-lambda v0, {}
- move-result v2
- const/16 v3, 31000
-
- if-ne v3, v2, :is_not_equal
- const-string v4, "(MoveResult) testS success"
- goto :end
-
-:is_not_equal
- const-string v4, "(MoveResult) testS failed"
-
-:end
- sget-object v5, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v5, v4}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
- return-void
-
-.end method
-
-# Lambda target for testS. Always returns "31000".
-.method public static lambdaS(J)S
- .registers 3
-
- const/16 v0, 31000
- return v0
-
-.end method
-
-# Test that ints are returned correctly via move-result.
-.method public static testI()V
- .registers 6
-
- create-lambda v0, LMoveResult;->lambdaI(J)I
- invoke-lambda v0, {}
- move-result v2
- const v3, 128000
-
- if-ne v3, v2, :is_not_equal
- const-string v4, "(MoveResult) testI success"
- goto :end
-
-:is_not_equal
- const-string v4, "(MoveResult) testI failed"
-
-:end
- sget-object v5, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v5, v4}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
- return-void
-
-.end method
-
-# Lambda target for testI. Always returns "128000".
-.method public static lambdaI(J)I
- .registers 3
-
- const v0, 128000
- return v0
-
-.end method
-
-# Test that chars are returned correctly via move-result.
-.method public static testC()V
- .registers 7
-
- create-lambda v0, LMoveResult;->lambdaC(J)C
- invoke-lambda v0, {}
- move-result v2
- const v3, 65535
-
- if-ne v3, v2, :is_not_equal
- const-string v4, "(MoveResult) testC success"
- goto :end
-
-:is_not_equal
- const-string v4, "(MoveResult) testC failed"
-
-:end
- sget-object v5, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v5, v4}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
- return-void
-
-.end method
-
-# Lambda target for testC. Always returns "65535".
-.method public static lambdaC(J)C
- .registers 3
-
- const v0, 65535
- return v0
-
-.end method
-
-# Test that longs are returned correctly via move-result.
-.method public static testJ()V
- .registers 9
-
- create-lambda v0, LMoveResult;->lambdaJ(J)J
- invoke-lambda v0, {}
- move-result v2
- const-wide v4, 0xdeadf00dc0ffeeL
-
- if-ne v4, v2, :is_not_equal
- const-string v6, "(MoveResult) testJ success"
- goto :end
-
-:is_not_equal
- const-string v6, "(MoveResult) testJ failed"
-
-:end
- sget-object v7, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v7, v6}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
- return-void
-
-.end method
-
-# Lambda target for testC. Always returns "0xdeadf00dc0ffeeL".
-.method public static lambdaJ(J)J
- .registers 5
-
- const-wide v0, 0xdeadf00dc0ffeeL
- return-wide v0
-
-.end method
-
-# Test that floats are returned correctly via move-result.
-.method public static testF()V
- .registers 6
-
- create-lambda v0, LMoveResult;->lambdaF(J)F
- invoke-lambda v0, {}
- move-result v2
- const v3, infinityf
-
- if-ne v3, v2, :is_not_equal
- const-string v4, "(MoveResult) testF success"
- goto :end
-
-:is_not_equal
- const-string v4, "(MoveResult) testF failed"
-
-:end
- sget-object v5, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v5, v4}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
- return-void
-
-.end method
-
-# Lambda target for testF. Always returns "infinityf".
-.method public static lambdaF(J)F
- .registers 4
-
- const v0, infinityf
- return v0
-
-.end method
-
-# Test that doubles are returned correctly via move-result.
-.method public static testD()V
- .registers 8
-
- create-lambda v0, LMoveResult;->lambdaD(J)D
- invoke-lambda v0, {}
- move-result-wide v2
- const-wide v4, -infinity
-
- if-ne v4, v2, :is_not_equal
- const-string v6, "(MoveResult) testD success"
- goto :end
-
-:is_not_equal
- const-string v6, "(MoveResult) testD failed"
-
-:end
- sget-object v7, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v7, v6}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
- return-void
-
-.end method
-
-# Lambda target for testD. Always returns "infinity".
-.method public static lambdaD(J)D
- .registers 5
-
- const-wide v0, -infinity
- return-wide v0
-
-.end method
-
-
-# Test that objects are returned correctly via move-result.
-.method public static testL()V
- .registers 8
-
- create-lambda v0, LMoveResult;->lambdaL(J)Ljava/lang/String;
- invoke-lambda v0, {}
- move-result-object v2
- const-string v4, "Interned string"
-
- # relies on string interning returning identical object references
- if-ne v4, v2, :is_not_equal
- const-string v6, "(MoveResult) testL success"
- goto :end
-
-:is_not_equal
- const-string v6, "(MoveResult) testL failed"
-
-:end
- sget-object v7, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v7, v6}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
- return-void
-
-.end method
-
-# Lambda target for testL. Always returns "Interned string" (string).
-.method public static lambdaL(J)Ljava/lang/String;
- .registers 5
-
- const-string v0, "Interned string"
- return-object v0
-
-.end method
-
-
diff --git a/test/955-lambda-smali/smali/SanityCheck.smali b/test/955-lambda-smali/smali/SanityCheck.smali
deleted file mode 100644
index 4c807d7..0000000
--- a/test/955-lambda-smali/smali/SanityCheck.smali
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-.class public LSanityCheck;
-.super Ljava/lang/Object;
-
-
-.method public constructor <init>()V
-.registers 1
- invoke-direct {p0}, Ljava/lang/Object;-><init>()V
- return-void
-.end method
-
-# This test is just here to make sure that we can at least execute basic non-lambda
-# functionality such as printing (when lambdas are enabled in the runtime).
-.method public static run()I
-# Don't use too many registers here to avoid hitting the Stack::SanityCheck frame<2KB assert
-.registers 3
- const-string v0, "SanityCheck"
- sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v1, v0}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
- const v2, 123456
- return v2
-.end method
diff --git a/test/955-lambda-smali/smali/TrivialHelloWorld.smali b/test/955-lambda-smali/smali/TrivialHelloWorld.smali
deleted file mode 100644
index 3444b13..0000000
--- a/test/955-lambda-smali/smali/TrivialHelloWorld.smali
+++ /dev/null
@@ -1,94 +0,0 @@
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-.class public LTrivialHelloWorld;
-.super Ljava/lang/Object;
-
-.method public constructor <init>()V
-.registers 1
- invoke-direct {p0}, Ljava/lang/Object;-><init>()V
- return-void
-.end method
-
-.method public static run()V
-.registers 8
- # Trivial 0-arg hello world
- create-lambda v0, LTrivialHelloWorld;->doHelloWorld(J)V
- # TODO: create-lambda should not write to both v0 and v1
- invoke-lambda v0, {}
-
- # Slightly more interesting 4-arg hello world
- create-lambda v2, doHelloWorldArgs(JLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)V
- # TODO: create-lambda should not write to both v2 and v3
- const-string v4, "A"
- const-string v5, "B"
- const-string v6, "C"
- const-string v7, "D"
- invoke-lambda v2, {v4, v5, v6, v7}
-
- invoke-static {}, LTrivialHelloWorld;->testFailures()V
-
- return-void
-.end method
-
-#TODO: should use a closure type instead of jlong.
-.method public static doHelloWorld(J)V
- .registers 5 # 1 wide parameters, 3 locals
-
- const-string v0, "Hello world! (0-args, no closure)"
-
- sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v1, v0}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
-
- return-void
-.end method
-
-#TODO: should use a closure type instead of jlong.
-.method public static doHelloWorldArgs(JLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)V
- .registers 9 # 1 wide parameter, 4 narrow parameters, 3 locals
-
- const-string v0, " Hello world! (4-args, no closure)"
- sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
-
- invoke-virtual {v1, p2}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
- invoke-virtual {v1, p3}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
- invoke-virtual {v1, p4}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
- invoke-virtual {v1, p5}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
- invoke-virtual {v1, v0}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
-
- return-void
-.end method
-
-# Test exceptions are thrown as expected when used opcodes incorrectly
-.method private static testFailures()V
- .registers 4 # 0 parameters, 4 locals
-
- const v0, 0 # v0 = null
- const v1, 0 # v1 = null
-:start
- invoke-lambda v0, {} # invoking a null lambda shall raise an NPE
-:end
- return-void
-
-:handler
- const-string v2, "Caught NPE"
- sget-object v3, Ljava/lang/System;->out:Ljava/io/PrintStream;
- invoke-virtual {v3, v2}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
-
- return-void
-
- .catch Ljava/lang/NullPointerException; {:start .. :end} :handler
-.end method
diff --git a/tools/cpp-define-generator/constant_globals.def b/tools/cpp-define-generator/constant_globals.def
index 1e24d64..a3ccc72 100644
--- a/tools/cpp-define-generator/constant_globals.def
+++ b/tools/cpp-define-generator/constant_globals.def
@@ -25,6 +25,7 @@
DEFINE_OBJECT_EXPR(ALIGNMENT_MASK, size_t, art::kObjectAlignment - 1)
DEFINE_OBJECT_EXPR(ALIGNMENT_MASK_TOGGLED, uint32_t, ~static_cast<uint32_t>(art::kObjectAlignment - 1))
+DEFINE_OBJECT_EXPR(ALIGNMENT_MASK_TOGGLED64, uint64_t, ~static_cast<uint64_t>(art::kObjectAlignment - 1))
#undef DEFINE_OBJECT_EXPR
diff --git a/tools/cpp-define-generator/constant_heap.def b/tools/cpp-define-generator/constant_heap.def
new file mode 100644
index 0000000..dc76736
--- /dev/null
+++ b/tools/cpp-define-generator/constant_heap.def
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Export heap values.
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "gc/heap.h"
+#endif
+
+// Size of references to the heap on the stack.
+DEFINE_EXPR(MIN_LARGE_OBJECT_THRESHOLD, size_t, art::gc::Heap::kMinLargeObjectThreshold)
+
diff --git a/tools/cpp-define-generator/offset_runtime.def b/tools/cpp-define-generator/offset_runtime.def
index 123992f..17167a0 100644
--- a/tools/cpp-define-generator/offset_runtime.def
+++ b/tools/cpp-define-generator/offset_runtime.def
@@ -25,15 +25,15 @@
// Note: these callee save methods loads require read barriers.
#define DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(field_name, constant_name) \
- DEFINE_OFFSET_EXPR(Runtime, field_name ## _CALLEE_SAVE_FRAME, size_t, art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: constant_name))
+ DEFINE_OFFSET_EXPR(Runtime, field_name ## _METHOD, size_t, art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: constant_name))
// Macro substring Constant name
-// Offset of field Runtime::callee_save_methods_[kSaveAll]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_ALL, kSaveAll)
-// Offset of field Runtime::callee_save_methods_[kRefsOnly]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(REFS_ONLY, kRefsOnly)
-// Offset of field Runtime::callee_save_methods_[kRefsAndArgs]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(REFS_AND_ARGS, kRefsAndArgs)
+// Offset of field Runtime::callee_save_methods_[kSaveAllCalleeSaves]
+DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_ALL_CALLEE_SAVES, kSaveAllCalleeSaves)
+// Offset of field Runtime::callee_save_methods_[kSaveRefsOnly]
+DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_REFS_ONLY, kSaveRefsOnly)
+// Offset of field Runtime::callee_save_methods_[kSaveRefsAndArgs]
+DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_REFS_AND_ARGS, kSaveRefsAndArgs)
// Offset of field Runtime::callee_save_methods_[kSaveEverything]
DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_EVERYTHING, kSaveEverything)
diff --git a/tools/cpp-define-generator/offsets_all.def b/tools/cpp-define-generator/offsets_all.def
index 01e4d5b..d2d8777 100644
--- a/tools/cpp-define-generator/offsets_all.def
+++ b/tools/cpp-define-generator/offsets_all.def
@@ -48,6 +48,7 @@
// TODO: MIRROR_*_ARRAY offsets (depends on header size)
// TODO: MIRROR_STRING offsets (depends on header size)
#include "offset_dexcache.def"
+#include "constant_heap.def"
#include "constant_lockword.def"
#include "constant_globals.def"
#include "constant_rosalloc.def"