Revert "JIT: Separate code allocation and initialization."
This reverts commit e1b36f09cb478a39ba443f6acb11cd1901c01c1d.
Reason for revert: Crashes in art::jit::JitMemoryRegion::FreeCode
https://logs.chromium.org/logs/art/buildbucket/cr-buildbucket.appspot.com/8900060997388741808/+/steps/test_libcore/0/stdout
Change-Id: I40259baec9acbb7889732548f8b31e0a48651cd9
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index a45f502..d30d681 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1288,24 +1288,17 @@
ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map.
ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
jni_compiled_method);
-
- ArrayRef<const uint8_t> reserved_code;
- ArrayRef<const uint8_t> reserved_data;
- if (!code_cache->Reserve(self,
- region,
- jni_compiled_method.GetCode().size(),
- stack_map.size(),
- /* number_of_roots= */ 0,
- method,
- /*out*/ &reserved_code,
- /*out*/ &reserved_data)) {
+ const uint8_t* roots_data = code_cache->ReserveData(
+ self, region, stack_map.size(), /* number_of_roots= */ 0, method);
+ if (roots_data == nullptr) {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
return false;
}
- const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
// Add debug info after we know the code location but before we update entry-point.
- if (compiler_options.GenerateAnyDebugInfo()) {
+ const std::function<void(const void*)> generate_debug_info = [&](const void* code) {
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code);
+ const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
debug::MethodDebugInfo info = {};
info.custom_name = "art_jni_trampoline";
info.dex_file = dex_file;
@@ -1318,26 +1311,30 @@
info.is_native_debuggable = compiler_options.GetNativeDebuggable();
info.is_optimized = true;
info.is_code_address_text_relative = false;
- info.code_address = reinterpret_cast<uintptr_t>(code);
+ info.code_address = code_address;
info.code_size = jni_compiled_method.GetCode().size();
- info.frame_size_in_bytes = jni_compiled_method.GetFrameSize();
+ info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
info.code_info = nullptr;
info.cfi = jni_compiled_method.GetCfi();
GenerateJitDebugInfo(info);
- }
+ };
- if (!code_cache->Commit(self,
- region,
- method,
- reserved_code,
- jni_compiled_method.GetCode(),
- reserved_data,
- roots,
- ArrayRef<const uint8_t>(stack_map),
- osr,
- /* has_should_deoptimize_flag= */ false,
- cha_single_implementation_list)) {
- code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
+ const void* code = code_cache->CommitCode(
+ self,
+ region,
+ method,
+ jni_compiled_method.GetCode().data(),
+ jni_compiled_method.GetCode().size(),
+ stack_map.data(),
+ stack_map.size(),
+ roots_data,
+ roots,
+ osr,
+ /* has_should_deoptimize_flag= */ false,
+ cha_single_implementation_list,
+ generate_debug_info);
+ if (code == nullptr) {
+ code_cache->ClearData(self, region, roots_data);
return false;
}
@@ -1385,23 +1382,13 @@
}
ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item);
-
- ArrayRef<const uint8_t> reserved_code;
- ArrayRef<const uint8_t> reserved_data;
- if (!code_cache->Reserve(self,
- region,
- code_allocator.GetMemory().size(),
- stack_map.size(),
- /*number_of_roots=*/codegen->GetNumberOfJitRoots(),
- method,
- /*out*/ &reserved_code,
- /*out*/ &reserved_data)) {
+ size_t number_of_roots = codegen->GetNumberOfJitRoots();
+ const uint8_t* roots_data = code_cache->ReserveData(
+ self, region, stack_map.size(), number_of_roots, method);
+ if (roots_data == nullptr) {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
return false;
}
- const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
- const uint8_t* roots_data = reserved_data.data();
-
std::vector<Handle<mirror::Object>> roots;
codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots);
// The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope.
@@ -1412,8 +1399,10 @@
}));
// Add debug info after we know the code location but before we update entry-point.
- const CompilerOptions& compiler_options = GetCompilerOptions();
- if (compiler_options.GenerateAnyDebugInfo()) {
+ const std::function<void(const void*)> generate_debug_info = [&](const void* code) {
+ const CompilerOptions& compiler_options = GetCompilerOptions();
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code);
+ const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
debug::MethodDebugInfo info = {};
DCHECK(info.custom_name.empty());
info.dex_file = dex_file;
@@ -1426,26 +1415,32 @@
info.is_native_debuggable = compiler_options.GetNativeDebuggable();
info.is_optimized = true;
info.is_code_address_text_relative = false;
- info.code_address = reinterpret_cast<uintptr_t>(code);
+ info.code_address = code_address;
info.code_size = code_allocator.GetMemory().size();
- info.frame_size_in_bytes = codegen->GetFrameSize();
- info.code_info = stack_map.size() == 0 ? nullptr : stack_map.data();
+ info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
+ info.code_info = stack_map.size() == 0 ? nullptr : method_header->GetOptimizedCodeInfoPtr();
info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
GenerateJitDebugInfo(info);
- }
+ };
- if (!code_cache->Commit(self,
- region,
- method,
- reserved_code,
- code_allocator.GetMemory(),
- reserved_data,
- roots,
- ArrayRef<const uint8_t>(stack_map),
- osr,
- codegen->GetGraph()->HasShouldDeoptimizeFlag(),
- codegen->GetGraph()->GetCHASingleImplementationList())) {
- code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
+ const void* code = code_cache->CommitCode(
+ self,
+ region,
+ method,
+ code_allocator.GetMemory().data(),
+ code_allocator.GetMemory().size(),
+ stack_map.data(),
+ stack_map.size(),
+ roots_data,
+ roots,
+ osr,
+ codegen->GetGraph()->HasShouldDeoptimizeFlag(),
+ codegen->GetGraph()->GetCHASingleImplementationList(),
+ generate_debug_info);
+
+ if (code == nullptr) {
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
+ code_cache->ClearData(self, region, roots_data);
return false;
}
@@ -1482,7 +1477,7 @@
std::vector<uint8_t> elf = debug::MakeElfFileForJIT(isa, features, mini_debug_info, info);
// NB: Don't allow packing of full info since it would remove non-backtrace data.
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ Locks::jit_lock_->AssertHeld(Thread::Current());
const void* code_ptr = reinterpret_cast<const void*>(info.code_address);
AddNativeDebugInfoForJit(code_ptr, elf, /*allow_packing=*/ mini_debug_info);
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 7d8f667..6ab811b 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -323,6 +323,53 @@
return nullptr;
}
+uint8_t* JitCodeCache::CommitCode(Thread* self,
+ JitMemoryRegion* region,
+ ArtMethod* method,
+ const uint8_t* code,
+ size_t code_size,
+ const uint8_t* stack_map,
+ size_t stack_map_size,
+ const uint8_t* roots_data,
+ const std::vector<Handle<mirror::Object>>& roots,
+ bool osr,
+ bool has_should_deoptimize_flag,
+ const ArenaSet<ArtMethod*>& cha_single_implementation_list,
+ const std::function<void(const uint8_t* code)>&
+ generate_debug_info) {
+ uint8_t* result = CommitCodeInternal(self,
+ region,
+ method,
+ code,
+ code_size,
+ stack_map,
+ stack_map_size,
+ roots_data,
+ roots,
+ osr,
+ has_should_deoptimize_flag,
+ cha_single_implementation_list,
+ generate_debug_info);
+ if (result == nullptr) {
+ // Retry.
+ GarbageCollectCache(self);
+ result = CommitCodeInternal(self,
+ region,
+ method,
+ code,
+ code_size,
+ stack_map,
+ stack_map_size,
+ roots_data,
+ roots,
+ osr,
+ has_should_deoptimize_flag,
+ cha_single_implementation_list,
+ generate_debug_info);
+ }
+ return result;
+}
+
bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
bool in_collection = false;
while (collection_in_progress_) {
@@ -625,17 +672,21 @@
}
}
-bool JitCodeCache::Commit(Thread* self,
- JitMemoryRegion* region,
- ArtMethod* method,
- ArrayRef<const uint8_t> reserved_code,
- ArrayRef<const uint8_t> code,
- ArrayRef<const uint8_t> reserved_data,
- const std::vector<Handle<mirror::Object>>& roots,
- ArrayRef<const uint8_t> stack_map,
- bool osr,
- bool has_should_deoptimize_flag,
- const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
+uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
+ JitMemoryRegion* region,
+ ArtMethod* method,
+ const uint8_t* code,
+ size_t code_size,
+ const uint8_t* stack_map,
+ size_t stack_map_size,
+ const uint8_t* roots_data,
+ const std::vector<Handle<mirror::Object>>& roots,
+ bool osr,
+ bool has_should_deoptimize_flag,
+ const ArenaSet<ArtMethod*>&
+ cha_single_implementation_list,
+ const std::function<void(const uint8_t* code)>&
+ generate_debug_info) {
DCHECK(!method->IsNative() || !osr);
if (!method->IsNative()) {
@@ -644,7 +695,6 @@
DCheckRootsAreValid(roots, IsSharedRegion(*region));
}
- const uint8_t* roots_data = reserved_data.data();
size_t root_table_size = ComputeRootTableSize(roots.size());
const uint8_t* stack_map_data = roots_data + root_table_size;
@@ -652,20 +702,26 @@
// We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
// finish.
WaitForPotentialCollectionToCompleteRunnable(self);
- const uint8_t* code_ptr = region->CommitCode(
- reserved_code, code, stack_map_data, has_should_deoptimize_flag);
+ const uint8_t* code_ptr = region->AllocateCode(
+ code, code_size, stack_map_data, has_should_deoptimize_flag);
if (code_ptr == nullptr) {
- return false;
+ return nullptr;
}
OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
// Commit roots and stack maps before updating the entry point.
- if (!region->CommitData(reserved_data, roots, stack_map)) {
- return false;
+ if (!region->CommitData(roots_data, roots, stack_map, stack_map_size)) {
+ ScopedCodeCacheWrite ccw(*region);
+ uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ region->FreeCode(reinterpret_cast<uint8_t*>(allocation));
+ return nullptr;
}
number_of_compilations_++;
+ // Add debug info after we know the code location but before we update entry-point.
+ generate_debug_info(code_ptr);
+
// We need to update the entry point in the runnable state for the instrumentation.
{
// The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
@@ -687,7 +743,10 @@
// Discard the code if any single-implementation assumptions are now invalid.
if (UNLIKELY(!single_impl_still_valid)) {
VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
- return false;
+ ScopedCodeCacheWrite ccw(*region);
+ uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ region->FreeCode(reinterpret_cast<uint8_t*>(allocation));
+ return nullptr;
}
DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable())
<< "Should not be using cha on debuggable apps/runs!";
@@ -746,9 +805,16 @@
<< reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
<< reinterpret_cast<const void*>(method_header->GetEntryPoint() +
method_header->GetCodeSize());
+ histogram_code_memory_use_.AddValue(code_size);
+ if (code_size > kCodeSizeLogThreshold) {
+ LOG(INFO) << "JIT allocated "
+ << PrettySize(code_size)
+ << " for compiled code of "
+ << ArtMethod::PrettyMethod(method);
+ }
}
- return true;
+ return reinterpret_cast<uint8_t*>(method_header);
}
size_t JitCodeCache::CodeCacheSize() {
@@ -900,72 +966,38 @@
return GetCurrentRegion()->GetUsedMemoryForData();
}
-bool JitCodeCache::Reserve(Thread* self,
- JitMemoryRegion* region,
- size_t code_size,
- size_t stack_map_size,
- size_t number_of_roots,
- ArtMethod* method,
- /*out*/ArrayRef<const uint8_t>* reserved_code,
- /*out*/ArrayRef<const uint8_t>* reserved_data) {
- code_size = OatQuickMethodHeader::InstructionAlignedSize() + code_size;
- size_t data_size = RoundUp(ComputeRootTableSize(number_of_roots) + stack_map_size, sizeof(void*));
+void JitCodeCache::ClearData(Thread* self,
+ JitMemoryRegion* region,
+ const uint8_t* roots_data) {
+ MutexLock mu(self, *Locks::jit_lock_);
+ region->FreeData(roots_data);
+}
- const uint8_t* code;
- const uint8_t* data;
- // We might need to try the allocation twice (with GC in between to free up memory).
- for (int i = 0; i < 2; i++) {
- {
- ScopedThreadSuspension sts(self, kSuspended);
- MutexLock mu(self, *Locks::jit_lock_);
- WaitForPotentialCollectionToComplete(self);
- ScopedCodeCacheWrite ccw(*region);
- code = region->AllocateCode(code_size);
- data = region->AllocateData(data_size);
- }
- if (code == nullptr || data == nullptr) {
- Free(self, region, code, data);
- if (i == 0) {
- GarbageCollectCache(self);
- continue; // Retry after GC.
- } else {
- return false; // Fail.
- }
- }
- break; // Success.
+const uint8_t* JitCodeCache::ReserveData(Thread* self,
+ JitMemoryRegion* region,
+ size_t stack_map_size,
+ size_t number_of_roots,
+ ArtMethod* method) {
+ size_t table_size = ComputeRootTableSize(number_of_roots);
+ size_t size = RoundUp(stack_map_size + table_size, sizeof(void*));
+ const uint8_t* result = nullptr;
+
+ {
+ ScopedThreadSuspension sts(self, kSuspended);
+ MutexLock mu(self, *Locks::jit_lock_);
+ WaitForPotentialCollectionToComplete(self);
+ result = region->AllocateData(size);
}
- *reserved_code = ArrayRef<const uint8_t>(code, code_size);
- *reserved_data = ArrayRef<const uint8_t>(data, data_size);
MutexLock mu(self, *Locks::jit_lock_);
- histogram_code_memory_use_.AddValue(code_size);
- if (code_size > kCodeSizeLogThreshold) {
+ histogram_stack_map_memory_use_.AddValue(size);
+ if (size > kStackMapSizeLogThreshold) {
LOG(INFO) << "JIT allocated "
- << PrettySize(code_size)
- << " for compiled code of "
- << ArtMethod::PrettyMethod(method);
- }
- histogram_stack_map_memory_use_.AddValue(data_size);
- if (data_size > kStackMapSizeLogThreshold) {
- LOG(INFO) << "JIT allocated "
- << PrettySize(data_size)
+ << PrettySize(size)
<< " for stack maps of "
<< ArtMethod::PrettyMethod(method);
}
- return true;
-}
-
-void JitCodeCache::Free(Thread* self,
- JitMemoryRegion* region,
- const uint8_t* code,
- const uint8_t* data) {
- MutexLock mu(self, *Locks::jit_lock_);
- if (code != nullptr) {
- region->FreeCode(code);
- }
- if (data != nullptr) {
- region->FreeData(data);
- }
+ return result;
}
class MarkCodeClosure final : public Closure {
@@ -1653,7 +1685,7 @@
if (UNLIKELY(!data->IsCompiled())) {
// Failed to compile; the JNI compiler never fails, but the cache may be full.
jni_stubs_map_.erase(it); // Remove the entry added in NotifyCompilationOf().
- } // else Commit() updated entrypoints of all methods in the JniStubData.
+ } // else CommitCodeInternal() updated entrypoints of all methods in the JniStubData.
} else {
ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
if (info != nullptr) {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 12425cf..ace851f 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -180,6 +180,28 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
+ // Allocate and write code and its metadata to the code cache.
+ // `cha_single_implementation_list` needs to be registered via CHA (if it's
+ // still valid), since the compiled code still needs to be invalidated if the
+ // single-implementation assumptions are violated later. This needs to be done
+ // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
+ // guard elimination.
+ uint8_t* CommitCode(Thread* self,
+ JitMemoryRegion* region,
+ ArtMethod* method,
+ const uint8_t* code,
+ size_t code_size,
+ const uint8_t* stack_map,
+ size_t stack_map_size,
+ const uint8_t* roots_data,
+ const std::vector<Handle<mirror::Object>>& roots,
+ bool osr,
+ bool has_should_deoptimize_flag,
+ const ArenaSet<ArtMethod*>& cha_single_implementation_list,
+ const std::function<void(const uint8_t* code)>& generate_debug_info)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jit_lock_);
+
// Return true if the code cache contains this pc.
bool ContainsPc(const void* pc) const;
@@ -193,42 +215,20 @@
// Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise.
const void* GetJniStubCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
- // Allocate a region for both code and data in the JIT code cache.
- // The reserved memory is left completely uninitialized.
- bool Reserve(Thread* self,
- JitMemoryRegion* region,
- size_t code_size,
- size_t stack_map_size,
- size_t number_of_roots,
- ArtMethod* method,
- /*out*/ArrayRef<const uint8_t>* reserved_code,
- /*out*/ArrayRef<const uint8_t>* reserved_data)
+ // Allocate a region of data that will contain a stack map of size `stack_map_size` and
+ // `number_of_roots` roots accessed by the JIT code.
+ // Return a pointer to where roots will be stored.
+ const uint8_t* ReserveData(Thread* self,
+ JitMemoryRegion* region,
+ size_t stack_map_size,
+ size_t number_of_roots,
+ ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
- // Initialize code and data of previously allocated memory.
- //
- // `cha_single_implementation_list` needs to be registered via CHA (if it's
- // still valid), since the compiled code still needs to be invalidated if the
- // single-implementation assumptions are violated later. This needs to be done
- // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
- // guard elimination.
- bool Commit(Thread* self,
- JitMemoryRegion* region,
- ArtMethod* method,
- ArrayRef<const uint8_t> reserved_code, // Uninitialized destination.
- ArrayRef<const uint8_t> code, // Compiler output (source).
- ArrayRef<const uint8_t> reserved_data, // Uninitialized destination.
- const std::vector<Handle<mirror::Object>>& roots,
- ArrayRef<const uint8_t> stack_map, // Compiler output (source).
- bool osr,
- bool has_should_deoptimize_flag,
- const ArenaSet<ArtMethod*>& cha_single_implementation_list)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Locks::jit_lock_);
-
- // Free the previously allocated memory regions.
- void Free(Thread* self, JitMemoryRegion* region, const uint8_t* code, const uint8_t* data)
+ // Clear data from the data portion of the code cache.
+ void ClearData(
+ Thread* self, JitMemoryRegion* region, const uint8_t* roots_data)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
@@ -357,6 +357,24 @@
private:
JitCodeCache();
+ // Internal version of 'CommitCode' that will not retry if the
+ // allocation fails. Return null if the allocation fails.
+ uint8_t* CommitCodeInternal(Thread* self,
+ JitMemoryRegion* region,
+ ArtMethod* method,
+ const uint8_t* code,
+ size_t code_size,
+ const uint8_t* stack_map,
+ size_t stack_map_size,
+ const uint8_t* roots_data,
+ const std::vector<Handle<mirror::Object>>& roots,
+ bool osr,
+ bool has_should_deoptimize_flag,
+ const ArenaSet<ArtMethod*>& cha_single_implementation_list,
+ const std::function<void(const uint8_t* code)>& generate_debug_info)
+ REQUIRES(!Locks::jit_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
ProfilingInfo* AddProfilingInfoInternal(Thread* self,
ArtMethod* method,
const std::vector<uint32_t>& entries)
diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc
index 09980c8..43ef08e 100644
--- a/runtime/jit/jit_memory_region.cc
+++ b/runtime/jit/jit_memory_region.cc
@@ -350,37 +350,41 @@
}
}
-const uint8_t* JitMemoryRegion::CommitCode(ArrayRef<const uint8_t> reserved_code,
- ArrayRef<const uint8_t> code,
- const uint8_t* stack_map,
- bool has_should_deoptimize_flag) {
- DCHECK(IsInExecSpace(reserved_code.data()));
+const uint8_t* JitMemoryRegion::AllocateCode(const uint8_t* code,
+ size_t code_size,
+ const uint8_t* stack_map,
+ bool has_should_deoptimize_flag) {
ScopedCodeCacheWrite scc(*this);
size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
- size_t header_size = OatQuickMethodHeader::InstructionAlignedSize();
- size_t total_size = header_size + code.size();
+ // Ensure the header ends up at expected instruction alignment.
+ size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
+ size_t total_size = header_size + code_size;
// Each allocation should be on its own set of cache lines.
// `total_size` covers the OatQuickMethodHeader, the JIT generated machine code,
// and any alignment padding.
DCHECK_GT(total_size, header_size);
- DCHECK_LE(total_size, reserved_code.size());
- uint8_t* x_memory = const_cast<uint8_t*>(reserved_code.data());
- uint8_t* w_memory = const_cast<uint8_t*>(GetNonExecutableAddress(x_memory));
+ uint8_t* w_memory = reinterpret_cast<uint8_t*>(
+ mspace_memalign(exec_mspace_, alignment, total_size));
+ if (UNLIKELY(w_memory == nullptr)) {
+ return nullptr;
+ }
+ uint8_t* x_memory = GetExecutableAddress(w_memory);
// Ensure the header ends up at expected instruction alignment.
DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(w_memory + header_size), alignment);
+ used_memory_for_code_ += mspace_usable_size(w_memory);
const uint8_t* result = x_memory + header_size;
// Write the code.
- std::copy(code.begin(), code.end(), w_memory + header_size);
+ std::copy(code, code + code_size, w_memory + header_size);
// Write the header.
OatQuickMethodHeader* method_header =
OatQuickMethodHeader::FromCodePointer(w_memory + header_size);
new (method_header) OatQuickMethodHeader(
(stack_map != nullptr) ? result - stack_map : 0u,
- code.size());
+ code_size);
if (has_should_deoptimize_flag) {
method_header->SetHasShouldDeoptimizeFlag();
}
@@ -415,6 +419,7 @@
// correctness of the instructions present in the processor caches.
if (!cache_flush_success) {
PLOG(ERROR) << "Cache flush failed triggering code allocation failure";
+ FreeCode(x_memory);
return nullptr;
}
@@ -447,35 +452,24 @@
reinterpret_cast<uint32_t*>(roots_data)[length] = length;
}
-bool JitMemoryRegion::CommitData(ArrayRef<const uint8_t> reserved_data,
+bool JitMemoryRegion::CommitData(const uint8_t* readonly_roots_data,
const std::vector<Handle<mirror::Object>>& roots,
- ArrayRef<const uint8_t> stack_map) {
- DCHECK(IsInDataSpace(reserved_data.data()));
- uint8_t* roots_data = GetWritableDataAddress(reserved_data.data());
+ const uint8_t* stack_map,
+ size_t stack_map_size) {
+ uint8_t* roots_data = GetWritableDataAddress(readonly_roots_data);
size_t root_table_size = ComputeRootTableSize(roots.size());
uint8_t* stack_map_data = roots_data + root_table_size;
- DCHECK_LE(root_table_size + stack_map.size(), reserved_data.size());
FillRootTable(roots_data, roots);
- memcpy(stack_map_data, stack_map.data(), stack_map.size());
+ memcpy(stack_map_data, stack_map, stack_map_size);
// Flush data cache, as compiled code references literals in it.
// TODO(oth): establish whether this is necessary.
- if (UNLIKELY(!FlushCpuCaches(roots_data, roots_data + root_table_size + stack_map.size()))) {
+ if (UNLIKELY(!FlushCpuCaches(roots_data, roots_data + root_table_size + stack_map_size))) {
VLOG(jit) << "Failed to flush data in CommitData";
return false;
}
return true;
}
-const uint8_t* JitMemoryRegion::AllocateCode(size_t size) {
- size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
- void* result = mspace_memalign(exec_mspace_, alignment, size);
- if (UNLIKELY(result == nullptr)) {
- return nullptr;
- }
- used_memory_for_code_ += mspace_usable_size(result);
- return reinterpret_cast<uint8_t*>(GetExecutableAddress(result));
-}
-
void JitMemoryRegion::FreeCode(const uint8_t* code) {
code = GetNonExecutableAddress(code);
used_memory_for_code_ -= mspace_usable_size(code);
diff --git a/runtime/jit/jit_memory_region.h b/runtime/jit/jit_memory_region.h
index 6db931d..2bb69a7 100644
--- a/runtime/jit/jit_memory_region.h
+++ b/runtime/jit/jit_memory_region.h
@@ -79,25 +79,24 @@
// Set the footprint limit of the code cache.
void SetFootprintLimit(size_t new_footprint) REQUIRES(Locks::jit_lock_);
- const uint8_t* AllocateCode(size_t code_size) REQUIRES(Locks::jit_lock_);
+ // Copy the code into the region, and allocate an OatQuickMethodHeader.
+ // Callers should not write into the returned memory, as it may be read-only.
+ const uint8_t* AllocateCode(const uint8_t* code,
+ size_t code_size,
+ const uint8_t* stack_map,
+ bool has_should_deoptimize_flag)
+ REQUIRES(Locks::jit_lock_);
void FreeCode(const uint8_t* code) REQUIRES(Locks::jit_lock_);
const uint8_t* AllocateData(size_t data_size) REQUIRES(Locks::jit_lock_);
void FreeData(const uint8_t* data) REQUIRES(Locks::jit_lock_);
void FreeData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_) = delete;
void FreeWritableData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_);
- // Emit header and code into the memory pointed by `reserved_code` (despite it being const).
- // Returns pointer to copied code (within reserved_code region; after OatQuickMethodHeader).
- const uint8_t* CommitCode(ArrayRef<const uint8_t> reserved_code,
- ArrayRef<const uint8_t> code,
- const uint8_t* stack_map,
- bool has_should_deoptimize_flag)
- REQUIRES(Locks::jit_lock_);
-
- // Emit roots and stack map into the memory pointed by `roots_data` (despite it being const).
- bool CommitData(ArrayRef<const uint8_t> reserved_data,
+ // Emit roots and stack map into the memory pointed by `roots_data`.
+ bool CommitData(const uint8_t* roots_data,
const std::vector<Handle<mirror::Object>>& roots,
- ArrayRef<const uint8_t> stack_map)
+ const uint8_t* stack_map,
+ size_t stack_map_size)
REQUIRES(Locks::jit_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 9d0883b..e41c7ee 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -50,10 +50,6 @@
return FromCodePointer(EntryPointToCodePointer(entry_point));
}
- static size_t InstructionAlignedSize() {
- return RoundUp(sizeof(OatQuickMethodHeader), GetInstructionSetAlignment(kRuntimeISA));
- }
-
OatQuickMethodHeader(const OatQuickMethodHeader&) = default;
OatQuickMethodHeader& operator=(const OatQuickMethodHeader&) = default;