Merge "Give the JIT its own arena pool to avoid lock contentions."
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 23601c3..79a6d38 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -230,10 +230,10 @@
}
// Trim maps to reduce memory usage.
- // TODO: measure how much this increases compile time.
+ // TODO: move this to an idle phase.
{
TimingLogger::ScopedTiming t2("TrimMaps", &logger);
- runtime->GetArenaPool()->TrimMaps();
+ runtime->GetJitArenaPool()->TrimMaps();
}
total_time_ += NanoTime() - start_time;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 5a9f258..13d6d62 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -861,7 +861,7 @@
const uint32_t access_flags = method->GetAccessFlags();
const InvokeType invoke_type = method->GetInvokeType();
- ArenaAllocator arena(Runtime::Current()->GetArenaPool());
+ ArenaAllocator arena(Runtime::Current()->GetJitArenaPool());
CodeVectorAllocator code_allocator(&arena);
std::unique_ptr<CodeGenerator> codegen;
{
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index a4b38ea..44af3f7 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -183,10 +183,10 @@
free(reinterpret_cast<void*>(memory_));
}
-MemMapArena::MemMapArena(size_t size, bool low_4gb) {
+MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name) {
std::string error_msg;
map_.reset(MemMap::MapAnonymous(
- "LinearAlloc", nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg));
+ name, nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg));
CHECK(map_.get() != nullptr) << error_msg;
memory_ = map_->Begin();
size_ = map_->Size();
@@ -210,9 +210,12 @@
}
}
-ArenaPool::ArenaPool(bool use_malloc, bool low_4gb)
- : use_malloc_(use_malloc), lock_("Arena pool lock", kArenaPoolLock), free_arenas_(nullptr),
- low_4gb_(low_4gb) {
+ArenaPool::ArenaPool(bool use_malloc, bool low_4gb, const char* name)
+ : use_malloc_(use_malloc),
+ lock_("Arena pool lock", kArenaPoolLock),
+ free_arenas_(nullptr),
+ low_4gb_(low_4gb),
+ name_(name) {
if (low_4gb) {
CHECK(!use_malloc) << "low4gb must use map implementation";
}
@@ -250,7 +253,7 @@
}
if (ret == nullptr) {
ret = use_malloc_ ? static_cast<Arena*>(new MallocArena(size)) :
- new MemMapArena(size, low_4gb_);
+ new MemMapArena(size, low_4gb_, name_);
}
ret->Reset();
return ret;
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 8a96571..728f897 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -261,7 +261,7 @@
class MemMapArena FINAL : public Arena {
public:
- MemMapArena(size_t size, bool low_4gb);
+ MemMapArena(size_t size, bool low_4gb, const char* name);
virtual ~MemMapArena();
void Release() OVERRIDE;
@@ -271,7 +271,9 @@
class ArenaPool {
public:
- explicit ArenaPool(bool use_malloc = true, bool low_4gb = false);
+ ArenaPool(bool use_malloc = true,
+ bool low_4gb = false,
+ const char* name = "LinearAlloc");
~ArenaPool();
Arena* AllocArena(size_t size) REQUIRES(!lock_);
void FreeArenaChain(Arena* first) REQUIRES(!lock_);
@@ -287,6 +289,7 @@
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Arena* free_arenas_ GUARDED_BY(lock_);
const bool low_4gb_;
+ const char* name_;
DISALLOW_COPY_AND_ASSIGN(ArenaPool);
};
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index eb5455a..47ef214 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -316,6 +316,7 @@
linear_alloc_.reset();
low_4gb_arena_pool_.reset();
arena_pool_.reset();
+ jit_arena_pool_.reset();
MemMap::Shutdown();
ATRACE_END();
@@ -1019,10 +1020,13 @@
// Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
// can't be trimmed as easily.
const bool use_malloc = IsAotCompiler();
- arena_pool_.reset(new ArenaPool(use_malloc, false));
+ arena_pool_.reset(new ArenaPool(use_malloc, /* low_4gb */ false));
+ jit_arena_pool_.reset(
+ new ArenaPool(/* use_malloc */ false, /* low_4gb */ false, "CompilerMetadata"));
+
if (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
// 4gb, no malloc. Explanation in header.
- low_4gb_arena_pool_.reset(new ArenaPool(false, true));
+ low_4gb_arena_pool_.reset(new ArenaPool(/* use_malloc */ false, /* low_4gb */ true));
}
linear_alloc_.reset(CreateLinearAlloc());
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 8aac4ce..83e77d2 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -561,6 +561,9 @@
ArenaPool* GetArenaPool() {
return arena_pool_.get();
}
+ ArenaPool* GetJitArenaPool() {
+ return jit_arena_pool_.get();
+ }
const ArenaPool* GetArenaPool() const {
return arena_pool_.get();
}
@@ -669,6 +672,7 @@
gc::Heap* heap_;
+ std::unique_ptr<ArenaPool> jit_arena_pool_;
std::unique_ptr<ArenaPool> arena_pool_;
// Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
// compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image