Allow configuration of the CodeBuffer allocator.
Ideally, we'd like to template this somehow to avoid the use of the
preprocessor, but this will work for now.
Change-Id: Iba88b04a97f516ddb13a83035b078d8cf599da6c
diff --git a/SConstruct b/SConstruct
index 69d9523..4d20845 100644
--- a/SConstruct
+++ b/SConstruct
@@ -114,6 +114,12 @@
},
'negative_testing:on' : {
'CCFLAGS' : ['-DVIXL_NEGATIVE_TESTING']
+ },
+ 'code_buffer_allocator:mmap' : {
+ 'CCFLAGS' : ['-DVIXL_CODE_BUFFER_MMAP']
+ },
+ 'code_buffer_allocator:malloc' : {
+ 'CCFLAGS' : ['-DVIXL_CODE_BUFFER_MALLOC']
}
}
@@ -166,6 +172,15 @@
env['simulator'] = 'none'
+# 'mmap' is required for use with 'mprotect', which is needed for the tests
+# (when running natively), so we use it by default where we can.
+def code_buffer_allocator_handler(env):
+ directives = util.GetCompilerDirectives(env)
+ if '__linux__' in directives:
+ env['code_buffer_allocator'] = 'mmap'
+ else:
+ env['code_buffer_allocator'] = 'malloc'
+
# A validator checks the consistency of provided options against the environment.
def default_validator(env):
pass
@@ -193,9 +208,11 @@
'target_arch' : [ 'AArch32 only if the host compiler targets a 32-bit '
'architecture - otherwise both', target_arch_handler,
target_arch_validator],
- 'simulator' : ['on if the target architectures include AArch64 but '
+ 'simulator' : [ 'on if the target architectures include AArch64 but '
'the host is not AArch64, else off',
- simulator_handler, simulator_validator ]
+ simulator_handler, simulator_validator ],
+ 'code_buffer_allocator' : [ 'mmap with __linux__, malloc otherwise',
+ code_buffer_allocator_handler, default_validator ]
})
@@ -215,13 +232,17 @@
vars.AddVariables(
EnumVariable('mode', 'Build mode',
'release', allowed_values=config.build_options_modes),
- EnumVariable('negative_testing', 'Enable negative testing (needs exceptions)',
+ EnumVariable('negative_testing',
+ 'Enable negative testing (needs exceptions)',
'off', allowed_values=['on', 'off']),
DefaultVariable('symbols', 'Include debugging symbols in the binaries',
['on', 'off']),
DefaultVariable('target_arch', 'Target architecture',
['aarch32', 'aarch64', 'both']),
DefaultVariable('simulator', 'Simulators to include', ['aarch64', 'none']),
+ DefaultVariable('code_buffer_allocator',
+ 'Configure the allocation mechanism in the CodeBuffer',
+ ['malloc', 'mmap']),
('std', 'C++ standard. The standards tested are: %s.' % \
', '.join(config.tested_cpp_standards))
)
@@ -231,7 +252,8 @@
# set. These are the options that should be reflected in the build directory
# path.
options_influencing_build_path = [
- 'target_arch', 'mode', 'symbols', 'CXX', 'std', 'simulator', 'negative_testing'
+ 'target_arch', 'mode', 'symbols', 'CXX', 'std', 'simulator',
+ 'negative_testing', 'code_buffer_allocator'
]
diff --git a/src/code-buffer-vixl.cc b/src/code-buffer-vixl.cc
index 65abaea..51137e8 100644
--- a/src/code-buffer-vixl.cc
+++ b/src/code-buffer-vixl.cc
@@ -43,15 +43,17 @@
if (capacity_ == 0) {
return;
}
-#ifdef __APPLE__
+#ifdef VIXL_CODE_BUFFER_MALLOC
buffer_ = reinterpret_cast<byte*>(malloc(capacity_));
-#else
+#elif defined(VIXL_CODE_BUFFER_MMAP)
buffer_ = reinterpret_cast<byte*>(mmap(NULL,
capacity,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
-1,
0));
+#else
+#error Unknown code buffer allocator.
#endif
VIXL_CHECK(buffer_ != NULL);
// Aarch64 instructions must be word aligned, we assert the default allocator
@@ -75,24 +77,38 @@
CodeBuffer::~CodeBuffer() {
VIXL_ASSERT(!IsDirty());
if (managed_) {
-#ifdef __APPLE__
+#ifdef VIXL_CODE_BUFFER_MALLOC
free(buffer_);
-#else
+#elif defined(VIXL_CODE_BUFFER_MMAP)
munmap(buffer_, capacity_);
+#else
+#error Unknown code buffer allocator.
#endif
}
}
void CodeBuffer::SetExecutable() {
+#ifdef VIXL_CODE_BUFFER_MMAP
int ret = mprotect(buffer_, capacity_, PROT_READ | PROT_EXEC);
VIXL_CHECK(ret == 0);
+#else
+ // mprotect requires page-aligned memory blocks, which we can only guarantee
+ // with mmap.
+ VIXL_UNREACHABLE();
+#endif
}
void CodeBuffer::SetWritable() {
+#ifdef VIXL_CODE_BUFFER_MMAP
int ret = mprotect(buffer_, capacity_, PROT_READ | PROT_WRITE);
VIXL_CHECK(ret == 0);
+#else
+ // mprotect requires page-aligned memory blocks, which we can only guarantee
+ // with mmap.
+ VIXL_UNREACHABLE();
+#endif
}
@@ -148,14 +164,16 @@
VIXL_ASSERT(managed_);
VIXL_ASSERT(new_capacity > capacity_);
ptrdiff_t cursor_offset = GetCursorOffset();
-#ifdef __APPLE__
+#ifdef VIXL_CODE_BUFFER_MALLOC
buffer_ = static_cast<byte*>(realloc(buffer_, new_capacity));
VIXL_CHECK(buffer_ != NULL);
-#else
+#elif defined(VIXL_CODE_BUFFER_MMAP)
buffer_ = static_cast<byte*>(
mremap(buffer_, capacity_, new_capacity, MREMAP_MAYMOVE));
-#endif
VIXL_CHECK(buffer_ != MAP_FAILED);
+#else
+#error Unknown code buffer allocator.
+#endif
cursor_ = buffer_ + cursor_offset;
capacity_ = new_capacity;
diff --git a/tools/util.py b/tools/util.py
index 06ce3bd..53baf72 100644
--- a/tools/util.py
+++ b/tools/util.py
@@ -102,8 +102,8 @@
# Query the target architecture of the compiler. The 'target' architecture of
# the compiler used to build VIXL is considered to be the 'host' architecture of
# VIXL itself.
-def GetHostArch(cxx):
- directives = GetCompilerDirectives(cxx)
+def GetHostArch(env):
+ directives = GetCompilerDirectives(env)
if "__x86_64__" in directives:
return "x86_64"
elif "__i386__" in directives:
@@ -117,8 +117,8 @@
# Class representing the compiler toolchain and version.
class CompilerInformation(object):
- def __init__(self, cxx):
- directives = GetCompilerDirectives(cxx)
+ def __init__(self, env):
+ directives = GetCompilerDirectives(env)
if '__llvm__' in directives:
major = int(directives['__clang_major__'])
minor = int(directives['__clang_minor__'])