Merge "base: make SafeCopy work on older Linux kernels."
diff --git a/runtime/base/safe_copy.cc b/runtime/base/safe_copy.cc
index b69a56f..06249ac 100644
--- a/runtime/base/safe_copy.cc
+++ b/runtime/base/safe_copy.cc
@@ -18,9 +18,14 @@
 
 #include <unistd.h>
 #include <sys/uio.h>
+#include <sys/user.h>
+
+#include <algorithm>
 
 #include <android-base/macros.h>
 
+#include "runtime/base/bit_utils.h"
+
 namespace art {
 
 ssize_t SafeCopy(void *dst, const void *src, size_t len) {
@@ -29,12 +34,40 @@
     .iov_base = dst,
     .iov_len = len,
   };
-  struct iovec src_iov = {
-    .iov_base = const_cast<void*>(src),
-    .iov_len = len,
-  };
 
-  ssize_t rc = process_vm_readv(getpid(), &dst_iov, 1, &src_iov, 1, 0);
+  // Split up the remote read across page boundaries.
+  // From the manpage:
+  //   A partial read/write may result if one of the remote_iov elements points to an invalid
+  //   memory region in the remote process.
+  //
+  //   Partial transfers apply at the granularity of iovec elements.  These system calls won't
+  //   perform a partial transfer that splits a single iovec element.
+  constexpr size_t kMaxIovecs = 64;
+  struct iovec src_iovs[kMaxIovecs];
+  size_t iovecs_used = 0;
+
+  const char* cur = static_cast<const char*>(src);
+  while (len > 0) {
+    if (iovecs_used == kMaxIovecs) {
+      errno = EINVAL;
+      return -1;
+    }
+
+    src_iovs[iovecs_used].iov_base = const_cast<char*>(cur);
+    if (!IsAlignedParam(cur, PAGE_SIZE)) {
+      src_iovs[iovecs_used].iov_len = AlignUp(cur, PAGE_SIZE) - cur;
+    } else {
+      src_iovs[iovecs_used].iov_len = PAGE_SIZE;
+    }
+
+    src_iovs[iovecs_used].iov_len = std::min(src_iovs[iovecs_used].iov_len, len);
+
+    len -= src_iovs[iovecs_used].iov_len;
+    cur += src_iovs[iovecs_used].iov_len;
+    ++iovecs_used;
+  }
+
+  ssize_t rc = process_vm_readv(getpid(), &dst_iov, 1, src_iovs, iovecs_used, 0);
   if (rc == -1) {
     return 0;
   }
diff --git a/runtime/base/safe_copy.h b/runtime/base/safe_copy.h
index 2eee212..d0f497c 100644
--- a/runtime/base/safe_copy.h
+++ b/runtime/base/safe_copy.h
@@ -22,7 +22,8 @@
 namespace art {
 
 // Safely dereference a pointer.
-// Returns -1 if safe copy isn't implemented on the platform, 0 if src is unreadable.
+// Returns -1 if safe copy isn't implemented on the platform, or if the transfer is too large.
+// Returns 0 if src is unreadable.
 ssize_t SafeCopy(void *dst, const void *src, size_t len);
 
 }  // namespace art
diff --git a/runtime/base/safe_copy_test.cc b/runtime/base/safe_copy_test.cc
index d5b8cdb..987895e 100644
--- a/runtime/base/safe_copy_test.cc
+++ b/runtime/base/safe_copy_test.cc
@@ -18,6 +18,8 @@
 
 #include "common_runtime_test.h"
 
+#include <errno.h>
+#include <string.h>
 #include <sys/mman.h>
 #include <sys/user.h>
 
@@ -26,31 +28,75 @@
 #if defined(__linux__)
 
 TEST(SafeCopyTest, smoke) {
-  // Map two pages, and mark the second one as PROT_NONE.
-  void* map = mmap(nullptr, PAGE_SIZE * 2, PROT_READ | PROT_WRITE,
+  // Map four pages, mark the second one as PROT_NONE, unmap the last one.
+  void* map = mmap(nullptr, PAGE_SIZE * 4, PROT_READ | PROT_WRITE,
                    MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
   ASSERT_NE(MAP_FAILED, map);
   char* page1 = static_cast<char*>(map);
+  char* page2 = page1 + PAGE_SIZE;
+  char* page3 = page2 + PAGE_SIZE;
+  char* page4 = page3 + PAGE_SIZE;
   ASSERT_EQ(0, mprotect(page1 + PAGE_SIZE, PAGE_SIZE, PROT_NONE));
+  ASSERT_EQ(0, munmap(page4, PAGE_SIZE));
 
   page1[0] = 'a';
   page1[PAGE_SIZE - 1] = 'z';
 
+  page3[0] = 'b';
+  page3[PAGE_SIZE - 1] = 'y';
+
   char buf[PAGE_SIZE];
 
   // Completely valid read.
   memset(buf, 0xCC, sizeof(buf));
-  EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE), SafeCopy(buf, page1, PAGE_SIZE));
+  EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE), SafeCopy(buf, page1, PAGE_SIZE)) << strerror(errno);
   EXPECT_EQ(0, memcmp(buf, page1, PAGE_SIZE));
 
-  // Reading off of the end.
+  // Reading into a guard page.
   memset(buf, 0xCC, sizeof(buf));
   EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE - 1), SafeCopy(buf, page1 + 1, PAGE_SIZE));
   EXPECT_EQ(0, memcmp(buf, page1 + 1, PAGE_SIZE - 1));
 
+  // Reading from a guard page into a real page.
+  memset(buf, 0xCC, sizeof(buf));
+  EXPECT_EQ(0, SafeCopy(buf, page2 + PAGE_SIZE - 1, PAGE_SIZE));
+
+  // Reading off of the end of a mapping.
+  memset(buf, 0xCC, sizeof(buf));
+  EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE), SafeCopy(buf, page3, PAGE_SIZE * 2));
+  EXPECT_EQ(0, memcmp(buf, page3, PAGE_SIZE));
+
   // Completely invalid.
   EXPECT_EQ(0, SafeCopy(buf, page1 + PAGE_SIZE, PAGE_SIZE));
-  ASSERT_EQ(0, munmap(map, PAGE_SIZE * 2));
+
+  // Clean up.
+  ASSERT_EQ(0, munmap(map, PAGE_SIZE * 3));
+}
+
+TEST(SafeCopyTest, alignment) {
+  // Copy the middle of a mapping to the end of another one.
+  void* src_map = mmap(nullptr, PAGE_SIZE * 3, PROT_READ | PROT_WRITE,
+                       MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  ASSERT_NE(MAP_FAILED, src_map);
+
+  // Add a guard page to make sure we don't write past the end of the mapping.
+  void* dst_map = mmap(nullptr, PAGE_SIZE * 4, PROT_READ | PROT_WRITE,
+                       MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  ASSERT_NE(MAP_FAILED, dst_map);
+
+  char* src = static_cast<char*>(src_map);
+  char* dst = static_cast<char*>(dst_map);
+  ASSERT_EQ(0, mprotect(dst + 3 * PAGE_SIZE, PAGE_SIZE, PROT_NONE));
+
+  src[512] = 'a';
+  src[PAGE_SIZE * 3 - 512 - 1] = 'z';
+
+  EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE * 3 - 1024),
+            SafeCopy(dst + 1024, src + 512, PAGE_SIZE * 3 - 1024));
+  EXPECT_EQ(0, memcmp(dst + 1024, src + 512, PAGE_SIZE * 3 - 1024));
+
+  ASSERT_EQ(0, munmap(src_map, PAGE_SIZE * 3));
+  ASSERT_EQ(0, munmap(dst_map, PAGE_SIZE * 4));
 }
 
 #endif  // defined(__linux__)