AArch64: Add suspend check in managed code.
TODO: Remove x19 in the frame in runtime, generic jni, compiled jni.
Change-Id: Ibdc292c9e7adb3a5d3eff353c22f60ffc101f549
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 9bad736..c072959 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -692,7 +692,7 @@
// S : short
// C : char
// I : int
-// L : long
+// J : long
// F : float
// D : double
// L : reference(object, array)
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 38f110e..8dad90a 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -725,17 +725,10 @@
// Test suspend flag, return target of taken suspend branch
LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
- // TODO(Arm64): re-enable suspend checks, once art_quick_test_suspend is implemented and
- // the suspend register is properly handled in the trampolines.
-#if 0
+ // FIXME: Define rA64_SUSPEND as w19, when we do not need two copies of reserved register.
+ // Note: The opcode is not set as wide, so actually we are using the 32-bit version register.
NewLIR3(kA64Subs3rRd, rA64_SUSPEND, rA64_SUSPEND, 1);
return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
-#else
- // TODO(Arm64): Fake suspend check. Will always fail to branch. Remove this.
- LIR* branch = NewLIR2((target == NULL) ? kA64Cbnz2rt : kA64Cbz2rt, rwzr, 0);
- branch->target = target;
- return branch;
-#endif
}
// Decrement register and branch on condition
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 808060d..0222447 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -630,12 +630,6 @@
DCHECK_EQ(info->StorageMask(), 0x1U);
}
- // TODO: re-enable this when we can safely save r4 over the suspension code path.
- bool no_suspend = NO_SUSPEND; // || !Runtime::Current()->ExplicitSuspendChecks();
- if (no_suspend) {
- GetRegInfo(rs_rA64_SUSPEND)->MarkFree();
- }
-
// Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
// TODO: adjust when we roll to hard float calling convention.
reg_pool_->next_core_reg_ = 2;