Add implicit null pointer and stack overflow checks for Mips.
Bug: 21555893
Change-Id: I2a995be128a5603d08753c14956dd8c8240ac63c
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index da12d8e..853980d 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -24,6 +24,7 @@
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
#include "mips_lir.h"
@@ -285,12 +286,25 @@
RegStorage check_reg = AllocPtrSizeTemp();
RegStorage new_sp = AllocPtrSizeTemp();
const RegStorage rs_sp = TargetPtrReg(kSp);
+ const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(target);
+ const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes;
+ bool generate_explicit_stack_overflow_check = large_frame ||
+ !cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks();
+
if (!skip_overflow_check) {
- // Load stack limit.
- if (cu_->target64) {
- LoadWordDisp(TargetPtrReg(kSelf), Thread::StackEndOffset<8>().Int32Value(), check_reg);
+ if (generate_explicit_stack_overflow_check) {
+ // Load stack limit.
+ if (cu_->target64) {
+ LoadWordDisp(TargetPtrReg(kSelf), Thread::StackEndOffset<8>().Int32Value(), check_reg);
+ } else {
+ Load32Disp(TargetPtrReg(kSelf), Thread::StackEndOffset<4>().Int32Value(), check_reg);
+ }
} else {
- Load32Disp(TargetPtrReg(kSelf), Thread::StackEndOffset<4>().Int32Value(), check_reg);
+ // Implicit stack overflow check.
+ // Generate a load from [sp, #-overflowsize]. If this is in the stack
+ // redzone we will get a segmentation fault.
+ Load32Disp(rs_sp, -kStackOverflowReservedUsableBytes, rs_rZERO);
+ MarkPossibleStackOverflowException();
}
}
// Spill core callee saves.
@@ -298,7 +312,7 @@
// NOTE: promotion of FP regs currently unsupported, thus no FP spill.
DCHECK_EQ(num_fp_spills_, 0);
const int frame_sub = frame_size_ - spill_count * ptr_size;
- if (!skip_overflow_check) {
+ if (!skip_overflow_check && generate_explicit_stack_overflow_check) {
class StackOverflowSlowPath : public LIRSlowPath {
public:
StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
@@ -329,6 +343,8 @@
OpRegCopy(rs_sp, new_sp); // Establish stack.
cfi_.AdjustCFAOffset(frame_sub);
} else {
+ // Here if skip_overflow_check or doing implicit stack overflow check.
+ // Just make room on the stack for the frame now.
OpRegImm(kOpSub, rs_sp, frame_sub);
cfi_.AdjustCFAOffset(frame_sub);
}
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 713264e..43fbcbd 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -79,6 +79,7 @@
OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
+ void ForceImplicitNullCheck(RegStorage reg, int opt_flags, bool is_wide);
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
VolatileKind is_volatile) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index f5ad7c7..1099303 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -691,6 +691,9 @@
reg_len = AllocTemp();
// Get len.
Load32Disp(rl_array.reg, len_offset, reg_len);
+ MarkPossibleNullPointerException(opt_flags);
+ } else {
+ ForceImplicitNullCheck(rl_array.reg, opt_flags, false);
}
// reg_ptr -> array data.
OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
@@ -781,6 +784,9 @@
// NOTE: max live temps(4) here.
// Get len.
Load32Disp(rl_array.reg, len_offset, reg_len);
+ MarkPossibleNullPointerException(opt_flags);
+ } else {
+ ForceImplicitNullCheck(rl_array.reg, opt_flags, false);
}
// reg_ptr -> array data.
OpRegImm(kOpAdd, reg_ptr, data_offset);
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 4c0bd83..b098bc2 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -791,6 +791,7 @@
RegStorage reg_ptr = TargetReg(kArg0);
OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
RegStorage r_tgt = LoadHelper(kQuickA64Load);
+ ForceImplicitNullCheck(reg_ptr, 0, true); // is_wide = true
LIR *ret = OpReg(kOpBlx, r_tgt);
RegStorage reg_ret;
if (cu_->target64) {
@@ -813,6 +814,7 @@
LockCallTemps(); // Using fixed registers.
RegStorage temp_ptr = AllocTemp();
OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
+ ForceImplicitNullCheck(temp_ptr, 0, true); // is_wide = true
RegStorage temp_value = AllocTempWide();
OpRegCopyWide(temp_value, r_src);
if (cu_->target64) {
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 95c61cd..37e5804 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -21,7 +21,9 @@
#include "base/logging.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "dex/reg_storage_eq.h"
+#include "dex/mir_graph.h"
#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
#include "mips_lir.h"
namespace art {
@@ -830,6 +832,22 @@
return res;
}
+void MipsMir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags, bool is_wide) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return;
+ }
+ // Force an implicit null check by performing a memory operation (load) from the given
+ // register with offset 0. This will cause a signal if the register contains 0 (null).
+ LIR* load = Load32Disp(reg, LOWORD_OFFSET, rs_rZERO);
+ MarkSafepointPC(load);
+ if (is_wide) {
+ load = Load32Disp(reg, HIWORD_OFFSET, rs_rZERO);
+ MarkSafepointPC(load);
+ }
+ }
+}
+
LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
VolatileKind is_volatile) {
if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index b4520e9..677ff0d 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -975,6 +975,8 @@
case kArm64:
case kX86:
case kX86_64:
+ case kMips:
+ case kMips64:
implicit_null_checks = true;
implicit_so_checks = true;
break;
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index abe495b..8ea78eb 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -17,11 +17,17 @@
#include "fault_handler.h"
#include <sys/ucontext.h>
+#include "art_method-inl.h"
#include "base/macros.h"
#include "globals.h"
#include "base/logging.h"
#include "base/hex_dump.h"
+#include "registers_mips.h"
+#include "thread.h"
+#include "thread-inl.h"
+extern "C" void art_quick_throw_stack_overflow();
+extern "C" void art_quick_throw_null_pointer_exception();
//
// Mips specific fault handler functions.
@@ -33,16 +39,52 @@
void* context ATTRIBUTE_UNUSED) {
}
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED,
- void* context ATTRIBUTE_UNUSED,
- ArtMethod** out_method ATTRIBUTE_UNUSED,
- uintptr_t* out_return_pc ATTRIBUTE_UNUSED,
- uintptr_t* out_sp ATTRIBUTE_UNUSED) {
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+ ArtMethod** out_method,
+ uintptr_t* out_return_pc, uintptr_t* out_sp) {
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ *out_sp = static_cast<uintptr_t>(sc->sc_regs[29]); // SP register
+ VLOG(signals) << "sp: " << *out_sp;
+ if (*out_sp == 0) {
+ return;
+ }
+
+ // In the case of a stack overflow, the stack is not valid and we can't
+ // get the method from the top of the stack. However it's in r0.
+ uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr); // BVA addr
+ uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kMips));
+ if (overflow_addr == fault_addr) {
+ *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[4]); // A0 register
+ } else {
+ // The method is at the top of the stack.
+ *out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
+ }
+
+ // Work out the return PC. This will be the address of the instruction
+ // following the faulting ldr/str instruction.
+
+ VLOG(signals) << "pc: " << std::hex
+ << static_cast<void*>(reinterpret_cast<uint8_t*>(sc->sc_pc));
+
+ *out_return_pc = sc->sc_pc + 4;
}
bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
- void* context ATTRIBUTE_UNUSED) {
- return false;
+ void* context) {
+ // The code that looks for the catch location needs to know the value of the
+ // PC at the point of call. For Null checks we insert a GC map that is immediately after
+ // the load/store instruction that might cause the fault.
+
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+
+ sc->sc_regs[31] = sc->sc_pc + 4; // RA needs to point to gc map location
+ sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception);
+ sc->sc_regs[25] = sc->sc_pc; // make sure T9 points to the function
+ VLOG(signals) << "Generating null pointer exception";
+ return true;
}
bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
@@ -50,8 +92,51 @@
return false;
}
-bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
- void* context ATTRIBUTE_UNUSED) {
- return false;
+// Stack overflow fault handler.
+//
+// This checks that the fault address is equal to the current stack pointer
+// minus the overflow region size (16K typically). The instruction that
+// generates this signal is:
+//
+// lw zero, -16384(sp)
+//
+// It will fault if sp is inside the protected region on the stack.
+//
+// If we determine this is a stack overflow we need to move the stack pointer
+// to the overflow region below the protected region.
+
+bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
+ VLOG(signals) << "sigcontext: " << std::hex << sc;
+
+ uintptr_t sp = sc->sc_regs[29]; // SP register
+ VLOG(signals) << "sp: " << std::hex << sp;
+
+ uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr); // BVA addr
+ VLOG(signals) << "fault_addr: " << std::hex << fault_addr;
+ VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
+ ", fault_addr: " << fault_addr;
+
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kMips);
+
+ // Check that the fault address is the value expected for a stack overflow.
+ if (fault_addr != overflow_addr) {
+ VLOG(signals) << "Not a stack overflow";
+ return false;
+ }
+
+ VLOG(signals) << "Stack overflow found";
+
+ // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from.
+ // The value of RA must be the same as it was when we entered the code that
+ // caused this fault. This will be inserted into a callee save frame by
+ // the function to which this handler returns (art_quick_throw_stack_overflow).
+ sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
+ sc->sc_regs[25] = sc->sc_pc; // make sure T9 points to the function
+
+ // The kernel will now return to the address in sc->arm_pc.
+ return true;
}
} // namespace art
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
index 277c2b2..4abfcf1 100644
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ b/runtime/arch/mips64/fault_handler_mips64.cc
@@ -17,11 +17,17 @@
#include "fault_handler.h"
#include <sys/ucontext.h>
+#include "art_method-inl.h"
#include "base/macros.h"
#include "globals.h"
#include "base/logging.h"
#include "base/hex_dump.h"
+#include "registers_mips64.h"
+#include "thread.h"
+#include "thread-inl.h"
+extern "C" void art_quick_throw_stack_overflow();
+extern "C" void art_quick_throw_null_pointer_exception();
//
// Mips64 specific fault handler functions.
@@ -33,16 +39,52 @@
void* context ATTRIBUTE_UNUSED) {
}
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED,
- void* context ATTRIBUTE_UNUSED,
- ArtMethod** out_method ATTRIBUTE_UNUSED,
- uintptr_t* out_return_pc ATTRIBUTE_UNUSED,
- uintptr_t* out_sp ATTRIBUTE_UNUSED) {
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+ ArtMethod** out_method,
+ uintptr_t* out_return_pc, uintptr_t* out_sp) {
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ *out_sp = static_cast<uintptr_t>(sc->sc_regs[29]); // SP register
+ VLOG(signals) << "sp: " << *out_sp;
+ if (*out_sp == 0) {
+ return;
+ }
+
+ // In the case of a stack overflow, the stack is not valid and we can't
+ // get the method from the top of the stack. However it's in r0.
+ uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr); // BVA addr
+ uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kMips64));
+ if (overflow_addr == fault_addr) {
+ *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[4]); // A0 register
+ } else {
+ // The method is at the top of the stack.
+ *out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
+ }
+
+ // Work out the return PC. This will be the address of the instruction
+ // following the faulting ldr/str instruction.
+
+ VLOG(signals) << "pc: " << std::hex
+ << static_cast<void*>(reinterpret_cast<uint8_t*>(sc->sc_pc));
+
+ *out_return_pc = sc->sc_pc + 4;
}
bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
- void* context ATTRIBUTE_UNUSED) {
- return false;
+ void* context) {
+ // The code that looks for the catch location needs to know the value of the
+ // PC at the point of call. For Null checks we insert a GC map that is immediately after
+ // the load/store instruction that might cause the fault.
+
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+
+ sc->sc_regs[31] = sc->sc_pc + 4; // RA needs to point to gc map location
+ sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception);
+ sc->sc_regs[25] = sc->sc_pc; // make sure T9 points to the function
+ VLOG(signals) << "Generating null pointer exception";
+ return true;
}
bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
@@ -50,8 +92,51 @@
return false;
}
-bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
- void* context ATTRIBUTE_UNUSED) {
- return false;
+// Stack overflow fault handler.
+//
+// This checks that the fault address is equal to the current stack pointer
+// minus the overflow region size (16K typically). The instruction that
+// generates this signal is:
+//
+// lw zero, -16384(sp)
+//
+// It will fault if sp is inside the protected region on the stack.
+//
+// If we determine this is a stack overflow we need to move the stack pointer
+// to the overflow region below the protected region.
+
+bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
+ VLOG(signals) << "sigcontext: " << std::hex << sc;
+
+ uintptr_t sp = sc->sc_regs[29]; // SP register
+ VLOG(signals) << "sp: " << std::hex << sp;
+
+ uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr); // BVA addr
+ VLOG(signals) << "fault_addr: " << std::hex << fault_addr;
+ VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
+ ", fault_addr: " << fault_addr;
+
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kMips64);
+
+ // Check that the fault address is the value expected for a stack overflow.
+ if (fault_addr != overflow_addr) {
+ VLOG(signals) << "Not a stack overflow";
+ return false;
+ }
+
+ VLOG(signals) << "Stack overflow found";
+
+ // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from.
+ // The value of RA must be the same as it was when we entered the code that
+ // caused this fault. This will be inserted into a callee save frame by
+ // the function to which this handler returns (art_quick_throw_stack_overflow).
+ sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
+ sc->sc_regs[25] = sc->sc_pc; // make sure T9 points to the function
+
+ // The kernel will now return to the address in sc->arm_pc.
+ return true;
}
} // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 5fef8ae..20e4149 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -929,6 +929,8 @@
case kX86:
case kArm64:
case kX86_64:
+ case kMips:
+ case kMips64:
implicit_null_checks_ = true;
// Installing stack protection does not play well with valgrind.
implicit_so_checks_ = (RUNNING_ON_VALGRIND == 0);
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 3d97901..c4111f6 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -412,9 +412,6 @@
# Known broken tests for the MIPS64 optimizing compiler backend in 64-bit mode. b/21555893
TEST_ART_BROKEN_OPTIMIZING_MIPS64_64BIT_RUN_TESTS := \
- 004-SignalTest \
- 018-stack-overflow \
- 107-int-math2 \
449-checker-bce
ifeq ($(TARGET_ARCH),mips64)
@@ -427,20 +424,6 @@
TEST_ART_BROKEN_OPTIMIZING_MIPS64_64BIT_RUN_TESTS :=
-# Known broken tests for the MIPS64 optimizing compiler backend in 32-bit mode. b/21555893
-TEST_ART_BROKEN_OPTIMIZING_MIPS64_32BIT_RUN_TESTS := \
- 496-checker-inlining-and-class-loader
-
-ifeq ($(TARGET_ARCH),mips64)
- ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
- optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_MIPS64_32BIT_RUN_TESTS),32)
- endif
-endif
-
-TEST_ART_BROKEN_OPTIMIZING_MIPS64_32BIT_RUN_TESTS :=
-
# Known broken tests for the optimizing compiler.
TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS :=