ART: Use utils.h::RoundUp instead of explicit bit-fiddling
Change-Id: I249a2cfeb044d3699d02e13d42b8e72518571640
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index 18a5bce..032eabc 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -23,6 +23,7 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "mem_map.h"
+#include "utils.h"
namespace art {
@@ -155,7 +156,7 @@
if (UNLIKELY(running_on_valgrind_)) {
return AllocValgrind(bytes, kind);
}
- bytes = (bytes + 3) & ~3;
+ bytes = RoundUp(bytes, 4);
if (UNLIKELY(ptr_ + bytes > end_)) {
// Obtain a new block.
ObtainNewArenaForAllocation(bytes);
diff --git a/compiler/utils/scoped_arena_allocator.cc b/compiler/utils/scoped_arena_allocator.cc
index bd78eae..b8b0e6e 100644
--- a/compiler/utils/scoped_arena_allocator.cc
+++ b/compiler/utils/scoped_arena_allocator.cc
@@ -92,7 +92,7 @@
}
void* ArenaStack::AllocValgrind(size_t bytes, ArenaAllocKind kind) {
- size_t rounded_bytes = (bytes + kValgrindRedZoneBytes + 3) & ~3;
+ size_t rounded_bytes = RoundUp(bytes + kValgrindRedZoneBytes, 4);
uint8_t* ptr = top_ptr_;
if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
ptr = AllocateFromNextArena(rounded_bytes);
diff --git a/compiler/utils/scoped_arena_allocator.h b/compiler/utils/scoped_arena_allocator.h
index 28e86ec..d5b003c 100644
--- a/compiler/utils/scoped_arena_allocator.h
+++ b/compiler/utils/scoped_arena_allocator.h
@@ -67,7 +67,7 @@
if (UNLIKELY(running_on_valgrind_)) {
return AllocValgrind(bytes, kind);
}
- size_t rounded_bytes = (bytes + 3) & ~3;
+ size_t rounded_bytes = RoundUp(bytes, 4);
uint8_t* ptr = top_ptr_;
if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
ptr = AllocateFromNextArena(rounded_bytes);