MemoryBlock: Do not automatically extend a given size to a multiple of page size.
Previously, MemoryBlock automatically extends a requested buffer size to a
multiple of page size because (I believe) doing it was thought to be harmless
and with that you could get more memory (on average 2KiB on 4KiB-page systems)
"for free".
That programming interface turned out to be error-prone. If you request N
bytes, you usually expect that a resulting object returns N for `size()`.
That's not the case for MemoryBlock.
Looks like there is only one place where we take the advantage of
allocating more memory than the requested size. So, with this patch, I
simply removed the automatic size expansion feature from MemoryBlock
and do it on the caller side when needed. MemoryBlock now always
returns a buffer whose size is equal to the requested size.
Differential Revision: https://reviews.llvm.org/D56941
llvm-svn: 351916
diff --git a/lld/test/ELF/stdout.s b/lld/test/ELF/stdout.s
index 331167e..6c41436 100644
--- a/lld/test/ELF/stdout.s
+++ b/lld/test/ELF/stdout.s
@@ -1,12 +1,15 @@
# REQUIRES: x86
# RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux %s -o %t.o
-# RUN: ld.lld %t.o -o - > %t
-# RUN: llvm-objdump -d %t | FileCheck %s
+# RUN: ld.lld %t.o -o - > %t1
+# RUN: llvm-objdump -d %t1 | FileCheck %s
# CHECK: 0000000000201000 _start:
# CHECK: 201000: 90 nop
+# RUN: ld.lld %t.o -o %t2
+# RUN: diff %t1 %t2
+
.globl _start
_start:
nop
diff --git a/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp b/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
index 5f542f4..ba27779 100644
--- a/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
+++ b/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
@@ -242,7 +242,11 @@
allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
size_t NumBytes, const sys::MemoryBlock *const NearBlock,
unsigned Flags, std::error_code &EC) override {
- return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
+ // allocateMappedMemory calls mmap(2). We round up a request size
+ // to page size to get extra space for free.
+ static const size_t PageSize = sys::Process::getPageSize();
+ size_t ReqBytes = (NumBytes + PageSize - 1) & ~(PageSize - 1);
+ return sys::Memory::allocateMappedMemory(ReqBytes, NearBlock, Flags, EC);
}
std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
diff --git a/llvm/lib/Support/Unix/Memory.inc b/llvm/lib/Support/Unix/Memory.inc
index c20629c..3c4d324 100644
--- a/llvm/lib/Support/Unix/Memory.inc
+++ b/llvm/lib/Support/Unix/Memory.inc
@@ -91,9 +91,6 @@
if (NumBytes == 0)
return MemoryBlock();
- static const size_t PageSize = Process::getPageSize();
- const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
-
int fd = -1;
int MMFlags = MAP_PRIVATE |
@@ -113,11 +110,12 @@
// Use any near hint and the page size to set a page-aligned starting address
uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
NearBlock->size() : 0;
+ static const size_t PageSize = Process::getPageSize();
if (Start && Start % PageSize)
Start += PageSize - Start % PageSize;
- void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages,
- Protect, MMFlags, fd, 0);
+ void *Addr = ::mmap(reinterpret_cast<void *>(Start), NumBytes, Protect,
+ MMFlags, fd, 0);
if (Addr == MAP_FAILED) {
if (NearBlock) //Try again without a near hint
return allocateMappedMemory(NumBytes, nullptr, PFlags, EC);
@@ -128,7 +126,7 @@
MemoryBlock Result;
Result.Address = Addr;
- Result.Size = NumPages*PageSize;
+ Result.Size = NumBytes;
// Rely on protectMappedMemory to invalidate instruction cache.
if (PFlags & MF_EXEC) {