Use CheckedCall for mprotects

Aims to prevent silent failures causing SIGSEGV in random places.

Bug: 66910552
Test: test-art-host

Change-Id: I91742b0b0059b5125ac663c3c3422ac5562df0fa
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index eeb5569..8999e17 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1768,7 +1768,11 @@
     // We cannot do that for app image even after the fixup as some interned
     // String references may actually end up pointing to moveable Strings.
     uint8_t* const_section_begin = space->Begin() + header.GetBootImageConstantTablesOffset();
-    mprotect(const_section_begin, header.GetBootImageConstantTablesSize(), PROT_READ);
+    CheckedCall(mprotect,
+                "protect constant tables",
+                const_section_begin,
+                header.GetBootImageConstantTablesSize(),
+                PROT_READ);
   }
 
   ClassTable* class_table = nullptr;
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 4597a96..45f4f82 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -453,7 +453,7 @@
   madvise(obj, allocation_size, MADV_DONTNEED);
   if (kIsDebugBuild) {
     // Can't disallow reads since we use them to find next chunks during coalescing.
-    mprotect(obj, allocation_size, PROT_READ);
+    CheckedCall(mprotect, __FUNCTION__, obj, allocation_size, PROT_READ);
   }
   return allocation_size;
 }
@@ -519,7 +519,7 @@
   // We always put our object at the start of the free block, there cannot be another free block
   // before it.
   if (kIsDebugBuild) {
-    mprotect(obj, allocation_size, PROT_READ | PROT_WRITE);
+    CheckedCall(mprotect, __FUNCTION__, obj, allocation_size, PROT_READ | PROT_WRITE);
   }
   new_info->SetPrevFreeBytes(0);
   new_info->SetByteSize(allocation_size, false);
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index b2e1fa5..a51df7c 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -254,7 +254,7 @@
 static void ZeroAndProtectRegion(uint8_t* begin, uint8_t* end) {
   ZeroAndReleasePages(begin, end - begin);
   if (kProtectClearedRegions) {
-    mprotect(begin, end - begin, PROT_NONE);
+    CheckedCall(mprotect, __FUNCTION__, begin, end - begin, PROT_NONE);
   }
 }
 
@@ -589,7 +589,7 @@
   region_space->AdjustNonFreeRegionLimit(idx_);
   type_ = RegionType::kRegionTypeToSpace;
   if (kProtectClearedRegions) {
-    mprotect(Begin(), kRegionSize, PROT_READ | PROT_WRITE);
+    CheckedCall(mprotect, __FUNCTION__, Begin(), kRegionSize, PROT_READ | PROT_WRITE);
   }
 }
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index ae08fe2..e122c6d 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -51,15 +51,6 @@
 static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
 static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
 
-#define CHECKED_MPROTECT(memory, size, prot)                \
-  do {                                                      \
-    int rc = mprotect(memory, size, prot);                  \
-    if (UNLIKELY(rc != 0)) {                                \
-      errno = rc;                                           \
-      PLOG(FATAL) << "Failed to mprotect jit code cache";   \
-    }                                                       \
-  } while (false)                                           \
-
 JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
                                    size_t max_capacity,
                                    bool generate_debug_info,
@@ -173,8 +164,16 @@
 
   SetFootprintLimit(current_capacity_);
 
-  CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
-  CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
+  CheckedCall(mprotect,
+              "mprotect jit code cache",
+              code_map_->Begin(),
+              code_map_->Size(),
+              kProtCode);
+  CheckedCall(mprotect,
+              "mprotect jit data cache",
+              data_map_->Begin(),
+              data_map_->Size(),
+              kProtData);
 
   VLOG(jit) << "Created jit code cache: initial data size="
             << PrettySize(initial_data_capacity)
@@ -203,14 +202,21 @@
         code_map_(code_map),
         only_for_tlb_shootdown_(only_for_tlb_shootdown) {
     ScopedTrace trace("mprotect all");
-    CHECKED_MPROTECT(
-        code_map_->Begin(), only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), kProtAll);
+    CheckedCall(mprotect,
+                "make code writable",
+                code_map_->Begin(),
+                only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(),
+                kProtAll);
   }
   ~ScopedCodeCacheWrite() {
     ScopedTrace trace("mprotect code");
-    CHECKED_MPROTECT(
-        code_map_->Begin(), only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), kProtCode);
+    CheckedCall(mprotect,
+                "make code protected",
+                code_map_->Begin(),
+                only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(),
+                kProtCode);
   }
+
  private:
   MemMap* const code_map_;
 
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 9a42c29..3f4cb94 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -638,7 +638,7 @@
     Runtime* runtime = Runtime::Current();
     if (UNLIKELY(runtime == nullptr)) {
       // This must be oatdump without boot image. Make sure the .bss is inaccessible.
-      mprotect(const_cast<uint8_t*>(BssBegin()), BssSize(), PROT_NONE);
+      CheckedCall(mprotect, "protect bss", const_cast<uint8_t*>(BssBegin()), BssSize(), PROT_NONE);
     } else {
       // Map boot image tables into the .bss. The reserved size must match size of the tables.
       size_t reserved_size = static_cast<size_t>(boot_image_tables_end - boot_image_tables);
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index fb77b84..cffaffc 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -31,6 +31,7 @@
 #include "base/time_utils.h"
 #include "runtime.h"
 #include "thread-current-inl.h"
+#include "utils.h"
 
 namespace art {
 
@@ -49,8 +50,11 @@
                                     false, false, &error_msg));
   CHECK(stack_.get() != nullptr) << error_msg;
   CHECK_ALIGNED(stack_->Begin(), kPageSize);
-  int mprotect_result = mprotect(stack_->Begin(), kPageSize, PROT_NONE);
-  CHECK_EQ(mprotect_result, 0) << "Failed to mprotect() bottom page of thread pool worker stack.";
+  CheckedCall(mprotect,
+              "mprotect bottom page of thread pool worker stack",
+              stack_->Begin(),
+              kPageSize,
+              PROT_NONE);
   const char* reason = "new thread pool worker thread";
   pthread_attr_t attr;
   CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);