Add low_4gb support to MapFile and MapFileAtAddress

Motivation is to use this for loading app images in low 4GB at a
non fixed address.

Added test.

Bug: 22858531
Change-Id: I0f79a4a7bfbfbdfc112e41b25c8682b1fb932ab7
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 3a93aac..70096f5 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -231,7 +231,14 @@
       return nullptr;
     }
     size_t length = sbuf.st_size;
-    map.reset(MemMap::MapFile(length, PROT_READ, MAP_PRIVATE, fd, 0, location, error_msg));
+    map.reset(MemMap::MapFile(length,
+                              PROT_READ,
+                              MAP_PRIVATE,
+                              fd,
+                              0,
+                              /*low_4gb*/false,
+                              location,
+                              error_msg));
     if (map.get() == nullptr) {
       DCHECK(!error_msg->empty());
       return nullptr;
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 723ee74..2819670 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -189,8 +189,14 @@
   if (program_header_only_) {
     // first just map ELF header to get program header size information
     size_t elf_header_size = sizeof(Elf_Ehdr);
-    if (!SetMap(MemMap::MapFile(elf_header_size, prot, flags, file_->Fd(), 0,
-                                file_->GetPath().c_str(), error_msg),
+    if (!SetMap(MemMap::MapFile(elf_header_size,
+                                prot,
+                                flags,
+                                file_->Fd(),
+                                0,
+                                /*low4_gb*/false,
+                                file_->GetPath().c_str(),
+                                error_msg),
                 error_msg)) {
       return false;
     }
@@ -202,16 +208,28 @@
                                 sizeof(Elf_Ehdr), file_->GetPath().c_str());
       return false;
     }
-    if (!SetMap(MemMap::MapFile(program_header_size, prot, flags, file_->Fd(), 0,
-                                file_->GetPath().c_str(), error_msg),
+    if (!SetMap(MemMap::MapFile(program_header_size,
+                                prot,
+                                flags,
+                                file_->Fd(),
+                                0,
+                                /*low4_gb*/false,
+                                file_->GetPath().c_str(),
+                                error_msg),
                 error_msg)) {
       *error_msg = StringPrintf("Failed to map ELF program headers: %s", error_msg->c_str());
       return false;
     }
   } else {
     // otherwise map entire file
-    if (!SetMap(MemMap::MapFile(file_->GetLength(), prot, flags, file_->Fd(), 0,
-                                file_->GetPath().c_str(), error_msg),
+    if (!SetMap(MemMap::MapFile(file_->GetLength(),
+                                prot,
+                                flags,
+                                file_->Fd(),
+                                0,
+                                /*low4_gb*/false,
+                                file_->GetPath().c_str(),
+                                error_msg),
                 error_msg)) {
       *error_msg = StringPrintf("Failed to map ELF file: %s", error_msg->c_str());
       return false;
@@ -1258,9 +1276,12 @@
       std::unique_ptr<MemMap> segment(
           MemMap::MapFileAtAddress(p_vaddr,
                                    program_header->p_filesz,
-                                   prot, flags, file_->Fd(),
+                                   prot,
+                                   flags,
+                                   file_->Fd(),
                                    program_header->p_offset,
-                                   true,  // implies MAP_FIXED
+                                   /*low4_gb*/false,
+                                   /*reuse*/true,  // implies MAP_FIXED
                                    file_->GetPath().c_str(),
                                    error_msg));
       if (segment.get() == nullptr) {
@@ -1775,8 +1796,14 @@
                               file->GetPath().c_str());
     return nullptr;
   }
-  std::unique_ptr<MemMap> map(MemMap::MapFile(EI_NIDENT, PROT_READ, MAP_PRIVATE, file->Fd(), 0,
-                                              file->GetPath().c_str(), error_msg));
+  std::unique_ptr<MemMap> map(MemMap::MapFile(EI_NIDENT,
+                                              PROT_READ,
+                                              MAP_PRIVATE,
+                                              file->Fd(),
+                                              0,
+                                              /*low4_gb*/false,
+                                              file->GetPath().c_str(),
+                                              error_msg));
   if (map == nullptr && map->Size() != EI_NIDENT) {
     return nullptr;
   }
@@ -1809,8 +1836,14 @@
                               file->GetPath().c_str());
     return nullptr;
   }
-  std::unique_ptr<MemMap> map(MemMap::MapFile(EI_NIDENT, PROT_READ, MAP_PRIVATE, file->Fd(), 0,
-                                              file->GetPath().c_str(), error_msg));
+  std::unique_ptr<MemMap> map(MemMap::MapFile(EI_NIDENT,
+                                              PROT_READ,
+                                              MAP_PRIVATE,
+                                              file->Fd(),
+                                              0,
+                                              /*low4_gb*/false,
+                                              file->GetPath().c_str(),
+                                              error_msg));
   if (map == nullptr && map->Size() != EI_NIDENT) {
     return nullptr;
   }
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index ce64b10..1fe9a03 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -709,19 +709,32 @@
   }
 
   // Note: The image header is part of the image due to mmap page alignment required of offset.
-  std::unique_ptr<MemMap> map(MemMap::MapFileAtAddress(
-      image_header.GetImageBegin(), image_header.GetImageSize(),
-      PROT_READ | PROT_WRITE, MAP_PRIVATE, file->Fd(), 0, false, image_filename, error_msg));
-  if (map.get() == nullptr) {
+  std::unique_ptr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
+                                                       image_header.GetImageSize(),
+                                                       PROT_READ | PROT_WRITE,
+                                                       MAP_PRIVATE,
+                                                       file->Fd(),
+                                                       0,
+                                                       /*low_4gb*/false,
+                                                       /*reuse*/false,
+                                                       image_filename,
+                                                       error_msg));
+  if (map == nullptr) {
     DCHECK(!error_msg->empty());
     return nullptr;
   }
   CHECK_EQ(image_header.GetImageBegin(), map->Begin());
   DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
 
-  std::unique_ptr<MemMap> image_map(MemMap::MapFileAtAddress(
-      nullptr, bitmap_section.Size(), PROT_READ, MAP_PRIVATE, file->Fd(),
-      bitmap_section.Offset(), false, image_filename, error_msg));
+  std::unique_ptr<MemMap> image_map(MemMap::MapFileAtAddress(nullptr,
+                                                             bitmap_section.Size(),
+                                                             PROT_READ, MAP_PRIVATE,
+                                                             file->Fd(),
+                                                             bitmap_section.Offset(),
+                                                             /*low_4gb*/false,
+                                                             /*reuse*/false,
+                                                             image_filename,
+                                                             error_msg));
   if (image_map.get() == nullptr) {
     *error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
     return nullptr;
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 2d3581d..6a0c6d6 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -252,9 +252,13 @@
 }
 
 #if USE_ART_LOW_4G_ALLOCATOR
-static inline void* TryMemMapLow4GB(void* ptr, size_t page_aligned_byte_count, int prot, int flags,
-                                    int fd) {
-  void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, 0);
+static inline void* TryMemMapLow4GB(void* ptr,
+                                    size_t page_aligned_byte_count,
+                                    int prot,
+                                    int flags,
+                                    int fd,
+                                    off_t offset) {
+  void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
   if (actual != MAP_FAILED) {
     // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
     // 4GB. If this is the case, unmap and retry.
@@ -267,8 +271,13 @@
 }
 #endif
 
-MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byte_count, int prot,
-                             bool low_4gb, bool reuse, std::string* error_msg) {
+MemMap* MemMap::MapAnonymous(const char* name,
+                             uint8_t* expected_ptr,
+                             size_t byte_count,
+                             int prot,
+                             bool low_4gb,
+                             bool reuse,
+                             std::string* error_msg) {
 #ifndef __LP64__
   UNUSED(low_4gb);
 #endif
@@ -317,122 +326,14 @@
   // We need to store and potentially set an error number for pretty printing of errors
   int saved_errno = 0;
 
-#ifdef __LP64__
-  // When requesting low_4g memory and having an expectation, the requested range should fit into
-  // 4GB.
-  if (low_4gb && (
-      // Start out of bounds.
-      (reinterpret_cast<uintptr_t>(expected_ptr) >> 32) != 0 ||
-      // End out of bounds. For simplicity, this will fail for the last page of memory.
-      (reinterpret_cast<uintptr_t>(expected_ptr + page_aligned_byte_count) >> 32) != 0)) {
-    *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb",
-                              expected_ptr, expected_ptr + page_aligned_byte_count);
-    return nullptr;
-  }
-#endif
-
-  // TODO:
-  // A page allocator would be a useful abstraction here, as
-  // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
-  // 2) The linear scheme, even with simple saving of the last known position, is very crude
-#if USE_ART_LOW_4G_ALLOCATOR
-  // MAP_32BIT only available on x86_64.
-  void* actual = MAP_FAILED;
-  if (low_4gb && expected_ptr == nullptr) {
-    bool first_run = true;
-
-    MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
-    for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
-      // Use maps_ as an optimization to skip over large maps.
-      // Find the first map which is address > ptr.
-      auto it = maps_->upper_bound(reinterpret_cast<void*>(ptr));
-      if (it != maps_->begin()) {
-        auto before_it = it;
-        --before_it;
-        // Start at the end of the map before the upper bound.
-        ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
-        CHECK_ALIGNED(ptr, kPageSize);
-      }
-      while (it != maps_->end()) {
-        // How much space do we have until the next map?
-        size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
-        // If the space may be sufficient, break out of the loop.
-        if (delta >= page_aligned_byte_count) {
-          break;
-        }
-        // Otherwise, skip to the end of the map.
-        ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
-        CHECK_ALIGNED(ptr, kPageSize);
-        ++it;
-      }
-
-      // Try to see if we get lucky with this address since none of the ART maps overlap.
-      actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags,
-                               fd.get());
-      if (actual != MAP_FAILED) {
-        next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count;
-        break;
-      }
-
-      if (4U * GB - ptr < page_aligned_byte_count) {
-        // Not enough memory until 4GB.
-        if (first_run) {
-          // Try another time from the bottom;
-          ptr = LOW_MEM_START - kPageSize;
-          first_run = false;
-          continue;
-        } else {
-          // Second try failed.
-          break;
-        }
-      }
-
-      uintptr_t tail_ptr;
-
-      // Check pages are free.
-      bool safe = true;
-      for (tail_ptr = ptr; tail_ptr < ptr + page_aligned_byte_count; tail_ptr += kPageSize) {
-        if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
-          safe = false;
-          break;
-        } else {
-          DCHECK_EQ(errno, ENOMEM);
-        }
-      }
-
-      next_mem_pos_ = tail_ptr;  // update early, as we break out when we found and mapped a region
-
-      if (safe == true) {
-        actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags,
-                                 fd.get());
-        if (actual != MAP_FAILED) {
-            break;
-        }
-      } else {
-        // Skip over last page.
-        ptr = tail_ptr;
-      }
-    }
-
-    if (actual == MAP_FAILED) {
-      LOG(ERROR) << "Could not find contiguous low-memory space.";
-      saved_errno = ENOMEM;
-    }
-  } else {
-    actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
-    saved_errno = errno;
-  }
-
-#else
-#if defined(__LP64__)
-  if (low_4gb && expected_ptr == nullptr) {
-    flags |= MAP_32BIT;
-  }
-#endif
-
-  void* actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
+  void* actual = MapInternal(expected_ptr,
+                             page_aligned_byte_count,
+                             prot,
+                             flags,
+                             fd.get(),
+                             0,
+                             low_4gb);
   saved_errno = errno;
-#endif
 
   if (actual == MAP_FAILED) {
     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
@@ -458,8 +359,15 @@
   return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
 }
 
-MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags,
-                                 int fd, off_t start, bool reuse, const char* filename,
+MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
+                                 size_t byte_count,
+                                 int prot,
+                                 int flags,
+                                 int fd,
+                                 off_t start,
+                                 bool low_4gb,
+                                 bool reuse,
+                                 const char* filename,
                                  std::string* error_msg) {
   CHECK_NE(0, prot);
   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
@@ -498,12 +406,13 @@
     page_aligned_byte_count += redzone_size;
   }
 
-  uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(page_aligned_expected,
-                                              page_aligned_byte_count,
-                                              prot,
-                                              flags,
-                                              fd,
-                                              page_aligned_offset));
+  uint8_t* actual = reinterpret_cast<uint8_t*>(MapInternal(page_aligned_expected,
+                                                           page_aligned_byte_count,
+                                                           prot,
+                                                           flags,
+                                                           fd,
+                                                           page_aligned_offset,
+                                                           low_4gb));
   if (actual == MAP_FAILED) {
     auto saved_errno = errno;
 
@@ -827,6 +736,133 @@
   size_ = new_size;
 }
 
+void* MemMap::MapInternal(void* addr,
+                          size_t length,
+                          int prot,
+                          int flags,
+                          int fd,
+                          off_t offset,
+                          bool low_4gb) {
+#ifdef __LP64__
+  DCHECK_EQ(flags & MAP_32BIT, 0);
+  // When requesting low_4g memory and having an expectation, the requested range should fit into
+  // 4GB.
+  if (low_4gb && (
+      // Start out of bounds.
+      (reinterpret_cast<uintptr_t>(addr) >> 32) != 0 ||
+      // End out of bounds. For simplicity, this will fail for the last page of memory.
+      ((reinterpret_cast<uintptr_t>(addr) + length) >> 32) != 0)) {
+    LOG(ERROR) << "The requested address space (" << addr << ", "
+               << reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + length)
+               << ") cannot fit in low_4gb";
+    return MAP_FAILED;
+  }
+#else
+  UNUSED(low_4gb);
+#endif
+  DCHECK_ALIGNED(length, kPageSize);
+  if (low_4gb) {
+    DCHECK_EQ(flags & MAP_FIXED, 0);
+  }
+  // TODO:
+  // A page allocator would be a useful abstraction here, as
+  // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
+  void* actual = MAP_FAILED;
+#if USE_ART_LOW_4G_ALLOCATOR
+  // MAP_32BIT only available on x86_64.
+  if (low_4gb && addr == nullptr) {
+    bool first_run = true;
+
+    MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+    for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
+      // Use maps_ as an optimization to skip over large maps.
+      // Find the first map which is address > ptr.
+      auto it = maps_->upper_bound(reinterpret_cast<void*>(ptr));
+      if (it != maps_->begin()) {
+        auto before_it = it;
+        --before_it;
+        // Start at the end of the map before the upper bound.
+        ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
+        CHECK_ALIGNED(ptr, kPageSize);
+      }
+      while (it != maps_->end()) {
+        // How much space do we have until the next map?
+        size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
+        // If the space may be sufficient, break out of the loop.
+        if (delta >= length) {
+          break;
+        }
+        // Otherwise, skip to the end of the map.
+        ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
+        CHECK_ALIGNED(ptr, kPageSize);
+        ++it;
+      }
+
+      // Try to see if we get lucky with this address since none of the ART maps overlap.
+      actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
+      if (actual != MAP_FAILED) {
+        next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + length;
+        return actual;
+      }
+
+      if (4U * GB - ptr < length) {
+        // Not enough memory until 4GB.
+        if (first_run) {
+          // Try another time from the bottom;
+          ptr = LOW_MEM_START - kPageSize;
+          first_run = false;
+          continue;
+        } else {
+          // Second try failed.
+          break;
+        }
+      }
+
+      uintptr_t tail_ptr;
+
+      // Check pages are free.
+      bool safe = true;
+      for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += kPageSize) {
+        if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
+          safe = false;
+          break;
+        } else {
+          DCHECK_EQ(errno, ENOMEM);
+        }
+      }
+
+      next_mem_pos_ = tail_ptr;  // update early, as we break out when we found and mapped a region
+
+      if (safe == true) {
+        actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
+        if (actual != MAP_FAILED) {
+          return actual;
+        }
+      } else {
+        // Skip over last page.
+        ptr = tail_ptr;
+      }
+    }
+
+    if (actual == MAP_FAILED) {
+      LOG(ERROR) << "Could not find contiguous low-memory space.";
+      errno = ENOMEM;
+    }
+  } else {
+    actual = mmap(addr, length, prot, flags, fd, offset);
+  }
+
+#else
+#if defined(__LP64__)
+  if (low_4gb && addr == nullptr) {
+    flags |= MAP_32BIT;
+  }
+#endif
+  actual = mmap(addr, length, prot, flags, fd, offset);
+#endif
+  return actual;
+}
+
 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
   os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
                      mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 7c11ceb..a67a925 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -61,8 +61,13 @@
   // a name.
   //
   // On success, returns returns a MemMap instance.  On failure, returns null.
-  static MemMap* MapAnonymous(const char* ashmem_name, uint8_t* addr, size_t byte_count, int prot,
-                              bool low_4gb, bool reuse, std::string* error_msg);
+  static MemMap* MapAnonymous(const char* ashmem_name,
+                              uint8_t* addr,
+                              size_t byte_count,
+                              int prot,
+                              bool low_4gb,
+                              bool reuse,
+                              std::string* error_msg);
 
   // Create placeholder for a region allocated by direct call to mmap.
   // This is useful when we do not have control over the code calling mmap,
@@ -74,10 +79,24 @@
   // "start" offset is absolute, not relative.
   //
   // On success, returns returns a MemMap instance.  On failure, returns null.
-  static MemMap* MapFile(size_t byte_count, int prot, int flags, int fd, off_t start,
-                         const char* filename, std::string* error_msg) {
-    return MapFileAtAddress(
-        nullptr, byte_count, prot, flags, fd, start, false, filename, error_msg);
+  static MemMap* MapFile(size_t byte_count,
+                         int prot,
+                         int flags,
+                         int fd,
+                         off_t start,
+                         bool low_4gb,
+                         const char* filename,
+                         std::string* error_msg) {
+    return MapFileAtAddress(nullptr,
+                            byte_count,
+                            prot,
+                            flags,
+                            fd,
+                            start,
+                            /*low_4gb*/low_4gb,
+                            /*reuse*/false,
+                            filename,
+                            error_msg);
   }
 
   // Map part of a file, taking care of non-page aligned offsets.  The
@@ -87,8 +106,15 @@
   // mapping where we do not take ownership of the memory.
   //
   // On success, returns returns a MemMap instance.  On failure, returns null.
-  static MemMap* MapFileAtAddress(uint8_t* addr, size_t byte_count, int prot, int flags, int fd,
-                                  off_t start, bool reuse, const char* filename,
+  static MemMap* MapFileAtAddress(uint8_t* addr,
+                                  size_t byte_count,
+                                  int prot,
+                                  int flags,
+                                  int fd,
+                                  off_t start,
+                                  bool low_4gb,
+                                  bool reuse,
+                                  const char* filename,
                                   std::string* error_msg);
 
   // Releases the memory mapping.
@@ -138,7 +164,9 @@
   }
 
   // Unmap the pages at end and remap them to create another memory map.
-  MemMap* RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
+  MemMap* RemapAtEnd(uint8_t* new_end,
+                     const char* tail_name,
+                     int tail_prot,
                      std::string* error_msg);
 
   static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
@@ -152,8 +180,14 @@
   static void Shutdown() REQUIRES(!Locks::mem_maps_lock_);
 
  private:
-  MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, size_t base_size,
-         int prot, bool reuse, size_t redzone_size = 0) REQUIRES(!Locks::mem_maps_lock_);
+  MemMap(const std::string& name,
+         uint8_t* begin,
+         size_t size,
+         void* base_begin,
+         size_t base_size,
+         int prot,
+         bool reuse,
+         size_t redzone_size = 0) REQUIRES(!Locks::mem_maps_lock_);
 
   static void DumpMapsLocked(std::ostream& os, bool terse)
       REQUIRES(Locks::mem_maps_lock_);
@@ -164,6 +198,15 @@
   static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
       REQUIRES(!Locks::mem_maps_lock_);
 
+  // Internal version of mmap that supports low 4gb emulation.
+  static void* MapInternal(void* addr,
+                           size_t length,
+                           int prot,
+                           int flags,
+                           int fd,
+                           off_t offset,
+                           bool low_4gb);
+
   const std::string name_;
   uint8_t* const begin_;  // Start of data.
   size_t size_;  // Length of data.
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index 13bf5b7..3790e53 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -18,13 +18,13 @@
 
 #include <memory>
 
+#include "common_runtime_test.h"
 #include "base/memory_tool.h"
-
-#include "gtest/gtest.h"
+#include "base/unix_file/fd_file.h"
 
 namespace art {
 
-class MemMapTest : public testing::Test {
+class MemMapTest : public CommonRuntimeTest {
  public:
   static uint8_t* BaseBegin(MemMap* mem_map) {
     return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
@@ -164,6 +164,26 @@
   ASSERT_TRUE(error_msg.empty());
   ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
 }
+TEST_F(MemMapTest, MapFile32Bit) {
+  CommonInit();
+  std::string error_msg;
+  ScratchFile scratch_file;
+  constexpr size_t kMapSize = kPageSize;
+  std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
+  ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
+  std::unique_ptr<MemMap> map(MemMap::MapFile(/*byte_count*/kMapSize,
+                                              PROT_READ,
+                                              MAP_PRIVATE,
+                                              scratch_file.GetFd(),
+                                              /*start*/0,
+                                              /*low_4gb*/true,
+                                              scratch_file.GetFilename().c_str(),
+                                              &error_msg));
+  ASSERT_TRUE(map != nullptr) << error_msg;
+  ASSERT_TRUE(error_msg.empty());
+  ASSERT_EQ(map->Size(), kMapSize);
+  ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
+}
 #endif
 
 TEST_F(MemMapTest, MapAnonymousExactAddr) {