[asan] get rid of some of the uses of kPageSize. The intent is to get rid of it completely to support platforms with multiple possible page sizes.
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@168517 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc
index b570f0d..9719aac 100644
--- a/lib/asan/asan_allocator.cc
+++ b/lib/asan/asan_allocator.cc
@@ -132,7 +132,7 @@
}
static u8 *MmapNewPagesAndPoisonShadow(uptr size) {
- CHECK(IsAligned(size, kPageSize));
+ CHECK(IsAligned(size, GetPageSizeCached()));
u8 *res = (u8*)MmapOrDie(size, __FUNCTION__);
PoisonShadow((uptr)res, size, kAsanHeapLeftRedzoneMagic);
if (flags()->debug) {
@@ -534,12 +534,13 @@
uptr mmap_size = Max(size, kMinMmapSize);
uptr n_chunks = mmap_size / size;
CHECK(n_chunks * size == mmap_size);
- if (size < kPageSize) {
+ uptr PageSize = GetPageSizeCached();
+ if (size < PageSize) {
// Size is small, just poison the last chunk.
n_chunks--;
} else {
// Size is large, allocate an extra page at right and poison it.
- mmap_size += kPageSize;
+ mmap_size += PageSize;
}
CHECK(n_chunks > 0);
u8 *mem = MmapNewPagesAndPoisonShadow(mmap_size);
@@ -813,18 +814,19 @@
}
void *asan_valloc(uptr size, StackTrace *stack) {
- void *ptr = (void*)Allocate(kPageSize, size, stack);
+ void *ptr = (void*)Allocate(GetPageSizeCached(), size, stack);
__asan_malloc_hook(ptr, size);
return ptr;
}
void *asan_pvalloc(uptr size, StackTrace *stack) {
- size = RoundUpTo(size, kPageSize);
+ uptr PageSize = GetPageSizeCached();
+ size = RoundUpTo(size, PageSize);
if (size == 0) {
// pvalloc(0) should allocate one page.
- size = kPageSize;
+ size = PageSize;
}
- void *ptr = (void*)Allocate(kPageSize, size, stack);
+ void *ptr = (void*)Allocate(PageSize, size, stack);
__asan_malloc_hook(ptr, size);
return ptr;
}
@@ -943,7 +945,7 @@
}
void FakeStack::AllocateOneSizeClass(uptr size_class) {
- CHECK(ClassMmapSize(size_class) >= kPageSize);
+ CHECK(ClassMmapSize(size_class) >= GetPageSizeCached());
uptr new_mem = (uptr)MmapOrDie(
ClassMmapSize(size_class), __FUNCTION__);
// Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
diff --git a/lib/asan/asan_linux.cc b/lib/asan/asan_linux.cc
index a70adee..2f89a1e 100644
--- a/lib/asan/asan_linux.cc
+++ b/lib/asan/asan_linux.cc
@@ -176,9 +176,10 @@
uptr sp = (uptr)ucp->uc_stack.ss_sp;
uptr size = ucp->uc_stack.ss_size;
// Align to page size.
- uptr bottom = sp & ~(kPageSize - 1);
+ uptr PageSize = GetPageSizeCached();
+ uptr bottom = sp & ~(PageSize - 1);
size += sp - bottom;
- size = RoundUpTo(size, kPageSize);
+ size = RoundUpTo(size, PageSize);
PoisonShadow(bottom, size, 0);
}
#else
diff --git a/lib/asan/asan_malloc_mac.cc b/lib/asan/asan_malloc_mac.cc
index 5b47e12..b32c18e 100644
--- a/lib/asan/asan_malloc_mac.cc
+++ b/lib/asan/asan_malloc_mac.cc
@@ -165,7 +165,7 @@
return malloc_zone_valloc(system_malloc_zone, size);
}
GET_STACK_TRACE_HERE_FOR_MALLOC;
- return asan_memalign(kPageSize, size, &stack);
+ return asan_memalign(GetPageSizeCached(), size, &stack);
}
#define GET_ZONE_FOR_PTR(ptr) \
diff --git a/lib/asan/asan_rtl.cc b/lib/asan/asan_rtl.cc
index 5d0c966..28a8c03 100644
--- a/lib/asan/asan_rtl.cc
+++ b/lib/asan/asan_rtl.cc
@@ -165,8 +165,8 @@
// ---------------------- mmap -------------------- {{{1
// Reserve memory range [beg, end].
static void ReserveShadowMemoryRange(uptr beg, uptr end) {
- CHECK((beg % kPageSize) == 0);
- CHECK(((end + 1) % kPageSize) == 0);
+ CHECK((beg % GetPageSizeCached()) == 0);
+ CHECK(((end + 1) % GetPageSizeCached()) == 0);
uptr size = end - beg + 1;
void *res = MmapFixedNoReserve(beg, size);
if (res != (void*)beg) {
@@ -271,8 +271,9 @@
int local_stack;
AsanThread *curr_thread = asanThreadRegistry().GetCurrent();
CHECK(curr_thread);
+ uptr PageSize = GetPageSizeCached();
uptr top = curr_thread->stack_top();
- uptr bottom = ((uptr)&local_stack - kPageSize) & ~(kPageSize-1);
+ uptr bottom = ((uptr)&local_stack - PageSize) & ~(PageSize-1);
PoisonShadow(bottom, top - bottom, 0);
}
diff --git a/lib/asan/asan_stats.cc b/lib/asan/asan_stats.cc
index 4f39ba6..cf8cadf 100644
--- a/lib/asan/asan_stats.cc
+++ b/lib/asan/asan_stats.cc
@@ -43,7 +43,7 @@
Printf("Stats: %zuM really freed by %zu calls\n",
really_freed>>20, real_frees);
Printf("Stats: %zuM (%zu full pages) mmaped in %zu calls\n",
- mmaped>>20, mmaped / kPageSize, mmaps);
+ mmaped>>20, mmaped / GetPageSizeCached(), mmaps);
PrintMallocStatsArray(" mmaps by size class: ", mmaped_by_size);
PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size);
diff --git a/lib/asan/asan_thread.cc b/lib/asan/asan_thread.cc
index 7f60ca9..a77e435 100644
--- a/lib/asan/asan_thread.cc
+++ b/lib/asan/asan_thread.cc
@@ -28,15 +28,16 @@
AsanThread *AsanThread::Create(u32 parent_tid, thread_callback_t start_routine,
void *arg, StackTrace *stack) {
- uptr size = RoundUpTo(sizeof(AsanThread), kPageSize);
+ uptr PageSize = GetPageSizeCached();
+ uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
AsanThread *thread = (AsanThread*)MmapOrDie(size, __FUNCTION__);
thread->start_routine_ = start_routine;
thread->arg_ = arg;
- const uptr kSummaryAllocSize = kPageSize;
+ const uptr kSummaryAllocSize = PageSize;
CHECK_LE(sizeof(AsanThreadSummary), kSummaryAllocSize);
AsanThreadSummary *summary =
- (AsanThreadSummary*)MmapOrDie(kPageSize, "AsanThreadSummary");
+ (AsanThreadSummary*)MmapOrDie(PageSize, "AsanThreadSummary");
summary->Init(parent_tid, stack);
summary->set_thread(thread);
thread->set_summary(summary);
@@ -66,7 +67,7 @@
// and we don't want it to have any poisoned stack.
ClearShadowForThreadStack();
fake_stack().Cleanup();
- uptr size = RoundUpTo(sizeof(AsanThread), kPageSize);
+ uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
UnmapOrDie(this, size);
}