Merge "Add more info to monitor contention logging" into nyc-dev
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
index 4dd8024..b7e000a 100644
--- a/compiler/debug/elf_debug_writer.cc
+++ b/compiler/debug/elf_debug_writer.cc
@@ -110,7 +110,7 @@
}
template <typename ElfTypes>
-static ArrayRef<const uint8_t> WriteDebugElfFileForMethodsInternal(
+static std::vector<uint8_t> WriteDebugElfFileForMethodsInternal(
InstructionSet isa,
const InstructionSetFeatures* features,
const ArrayRef<const MethodDebugInfo>& method_infos) {
@@ -126,14 +126,10 @@
false /* write_oat_patches */);
builder->End();
CHECK(builder->Good());
- // Make a copy of the buffer. We want to shrink it anyway.
- uint8_t* result = new uint8_t[buffer.size()];
- CHECK(result != nullptr);
- memcpy(result, buffer.data(), buffer.size());
- return ArrayRef<const uint8_t>(result, buffer.size());
+ return buffer;
}
-ArrayRef<const uint8_t> WriteDebugElfFileForMethods(
+std::vector<uint8_t> WriteDebugElfFileForMethods(
InstructionSet isa,
const InstructionSetFeatures* features,
const ArrayRef<const MethodDebugInfo>& method_infos) {
@@ -145,7 +141,7 @@
}
template <typename ElfTypes>
-static ArrayRef<const uint8_t> WriteDebugElfFileForClassesInternal(
+static std::vector<uint8_t> WriteDebugElfFileForClassesInternal(
InstructionSet isa,
const InstructionSetFeatures* features,
const ArrayRef<mirror::Class*>& types)
@@ -164,16 +160,12 @@
builder->End();
CHECK(builder->Good());
- // Make a copy of the buffer. We want to shrink it anyway.
- uint8_t* result = new uint8_t[buffer.size()];
- CHECK(result != nullptr);
- memcpy(result, buffer.data(), buffer.size());
- return ArrayRef<const uint8_t>(result, buffer.size());
+ return buffer;
}
-ArrayRef<const uint8_t> WriteDebugElfFileForClasses(InstructionSet isa,
- const InstructionSetFeatures* features,
- const ArrayRef<mirror::Class*>& types) {
+std::vector<uint8_t> WriteDebugElfFileForClasses(InstructionSet isa,
+ const InstructionSetFeatures* features,
+ const ArrayRef<mirror::Class*>& types) {
if (Is64BitInstructionSet(isa)) {
return WriteDebugElfFileForClassesInternal<ElfTypes64>(isa, features, types);
} else {
diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h
index 736370e..6f52249 100644
--- a/compiler/debug/elf_debug_writer.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -47,12 +47,12 @@
size_t text_section_size,
const ArrayRef<const MethodDebugInfo>& method_infos);
-ArrayRef<const uint8_t> WriteDebugElfFileForMethods(
+std::vector<uint8_t> WriteDebugElfFileForMethods(
InstructionSet isa,
const InstructionSetFeatures* features,
const ArrayRef<const MethodDebugInfo>& method_infos);
-ArrayRef<const uint8_t> WriteDebugElfFileForClasses(
+std::vector<uint8_t> WriteDebugElfFileForClasses(
InstructionSet isa,
const InstructionSetFeatures* features,
const ArrayRef<mirror::Class*>& types)
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 5294068..8bdff21 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -416,23 +416,27 @@
type ## _ENTRYPOINT_OFFSET(4, offset)); \
}
-const std::vector<uint8_t>* CompilerDriver::CreateJniDlsymLookup() const {
+std::unique_ptr<const std::vector<uint8_t>> CompilerDriver::CreateJniDlsymLookup() const {
CREATE_TRAMPOLINE(JNI, kJniAbi, pDlsymLookup)
}
-const std::vector<uint8_t>* CompilerDriver::CreateQuickGenericJniTrampoline() const {
+std::unique_ptr<const std::vector<uint8_t>> CompilerDriver::CreateQuickGenericJniTrampoline()
+ const {
CREATE_TRAMPOLINE(QUICK, kQuickAbi, pQuickGenericJniTrampoline)
}
-const std::vector<uint8_t>* CompilerDriver::CreateQuickImtConflictTrampoline() const {
+std::unique_ptr<const std::vector<uint8_t>> CompilerDriver::CreateQuickImtConflictTrampoline()
+ const {
CREATE_TRAMPOLINE(QUICK, kQuickAbi, pQuickImtConflictTrampoline)
}
-const std::vector<uint8_t>* CompilerDriver::CreateQuickResolutionTrampoline() const {
+std::unique_ptr<const std::vector<uint8_t>> CompilerDriver::CreateQuickResolutionTrampoline()
+ const {
CREATE_TRAMPOLINE(QUICK, kQuickAbi, pQuickResolutionTrampoline)
}
-const std::vector<uint8_t>* CompilerDriver::CreateQuickToInterpreterBridge() const {
+std::unique_ptr<const std::vector<uint8_t>> CompilerDriver::CreateQuickToInterpreterBridge()
+ const {
CREATE_TRAMPOLINE(QUICK, kQuickAbi, pQuickToInterpreterBridge)
}
#undef CREATE_TRAMPOLINE
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 905f84d..4308eac 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -161,11 +161,11 @@
}
// Generate the trampolines that are invoked by unresolved direct methods.
- const std::vector<uint8_t>* CreateJniDlsymLookup() const;
- const std::vector<uint8_t>* CreateQuickGenericJniTrampoline() const;
- const std::vector<uint8_t>* CreateQuickImtConflictTrampoline() const;
- const std::vector<uint8_t>* CreateQuickResolutionTrampoline() const;
- const std::vector<uint8_t>* CreateQuickToInterpreterBridge() const;
+ std::unique_ptr<const std::vector<uint8_t>> CreateJniDlsymLookup() const;
+ std::unique_ptr<const std::vector<uint8_t>> CreateQuickGenericJniTrampoline() const;
+ std::unique_ptr<const std::vector<uint8_t>> CreateQuickImtConflictTrampoline() const;
+ std::unique_ptr<const std::vector<uint8_t>> CreateQuickResolutionTrampoline() const;
+ std::unique_ptr<const std::vector<uint8_t>> CreateQuickToInterpreterBridge() const;
CompiledClass* GetCompiledClass(ClassReference ref) const
REQUIRES(!compiled_classes_lock_);
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index cda2e27..c8dfc93 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -69,9 +69,9 @@
DCHECK(jit_compiler != nullptr);
if (jit_compiler->GetCompilerOptions()->GetGenerateDebugInfo()) {
const ArrayRef<mirror::Class*> types_array(types, count);
- ArrayRef<const uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
+ std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
kRuntimeISA, jit_compiler->GetCompilerDriver()->GetInstructionSetFeatures(), types_array);
- CreateJITCodeEntry(std::unique_ptr<const uint8_t[]>(elf_file.data()), elf_file.size());
+ CreateJITCodeEntry(std::move(elf_file));
}
}
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 8832c84..05c85e0 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -18,6 +18,7 @@
#include <vector>
#include "arch/instruction_set.h"
+#include "base/arena_allocator.h"
#include "cfi_test.h"
#include "gtest/gtest.h"
#include "jni/quick/calling_convention.h"
@@ -42,15 +43,19 @@
const bool is_static = true;
const bool is_synchronized = false;
const char* shorty = "IIFII";
+
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+
std::unique_ptr<JniCallingConvention> jni_conv(
- JniCallingConvention::Create(is_static, is_synchronized, shorty, isa));
+ JniCallingConvention::Create(&arena, is_static, is_synchronized, shorty, isa));
std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
- ManagedRuntimeCallingConvention::Create(is_static, is_synchronized, shorty, isa));
+ ManagedRuntimeCallingConvention::Create(&arena, is_static, is_synchronized, shorty, isa));
const int frame_size(jni_conv->FrameSize());
const std::vector<ManagedRegister>& callee_save_regs = jni_conv->CalleeSaveRegisters();
// Assemble the method.
- std::unique_ptr<Assembler> jni_asm(Assembler::Create(isa));
+ std::unique_ptr<Assembler> jni_asm(Assembler::Create(&arena, isa));
jni_asm->cfi().SetEnabled(true);
jni_asm->BuildFrame(frame_size, mr_conv->MethodRegister(),
callee_save_regs, mr_conv->EntrySpills());
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index cef8c5d..e21f554 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -46,37 +46,51 @@
// Managed runtime calling convention
-ManagedRuntimeCallingConvention* ManagedRuntimeCallingConvention::Create(
- bool is_static, bool is_synchronized, const char* shorty, InstructionSet instruction_set) {
+std::unique_ptr<ManagedRuntimeCallingConvention> ManagedRuntimeCallingConvention::Create(
+ ArenaAllocator* arena,
+ bool is_static,
+ bool is_synchronized,
+ const char* shorty,
+ InstructionSet instruction_set) {
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2:
- return new arm::ArmManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+ return std::unique_ptr<ManagedRuntimeCallingConvention>(
+ new (arena) arm::ArmManagedRuntimeCallingConvention(is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
- return new arm64::Arm64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+ return std::unique_ptr<ManagedRuntimeCallingConvention>(
+ new (arena) arm64::Arm64ManagedRuntimeCallingConvention(
+ is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
- return new mips::MipsManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+ return std::unique_ptr<ManagedRuntimeCallingConvention>(
+ new (arena) mips::MipsManagedRuntimeCallingConvention(
+ is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
- return new mips64::Mips64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+ return std::unique_ptr<ManagedRuntimeCallingConvention>(
+ new (arena) mips64::Mips64ManagedRuntimeCallingConvention(
+ is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
- return new x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+ return std::unique_ptr<ManagedRuntimeCallingConvention>(
+ new (arena) x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
- return new x86_64::X86_64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+ return std::unique_ptr<ManagedRuntimeCallingConvention>(
+ new (arena) x86_64::X86_64ManagedRuntimeCallingConvention(
+ is_static, is_synchronized, shorty));
#endif
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return nullptr;
+ UNREACHABLE();
}
}
@@ -132,38 +146,46 @@
// JNI calling convention
-JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synchronized,
- const char* shorty,
- InstructionSet instruction_set) {
+std::unique_ptr<JniCallingConvention> JniCallingConvention::Create(ArenaAllocator* arena,
+ bool is_static,
+ bool is_synchronized,
+ const char* shorty,
+ InstructionSet instruction_set) {
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2:
- return new arm::ArmJniCallingConvention(is_static, is_synchronized, shorty);
+ return std::unique_ptr<JniCallingConvention>(
+ new (arena) arm::ArmJniCallingConvention(is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
- return new arm64::Arm64JniCallingConvention(is_static, is_synchronized, shorty);
+ return std::unique_ptr<JniCallingConvention>(
+ new (arena) arm64::Arm64JniCallingConvention(is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
- return new mips::MipsJniCallingConvention(is_static, is_synchronized, shorty);
+ return std::unique_ptr<JniCallingConvention>(
+ new (arena) mips::MipsJniCallingConvention(is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
- return new mips64::Mips64JniCallingConvention(is_static, is_synchronized, shorty);
+ return std::unique_ptr<JniCallingConvention>(
+ new (arena) mips64::Mips64JniCallingConvention(is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
- return new x86::X86JniCallingConvention(is_static, is_synchronized, shorty);
+ return std::unique_ptr<JniCallingConvention>(
+ new (arena) x86::X86JniCallingConvention(is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
- return new x86_64::X86_64JniCallingConvention(is_static, is_synchronized, shorty);
+ return std::unique_ptr<JniCallingConvention>(
+ new (arena) x86_64::X86_64JniCallingConvention(is_static, is_synchronized, shorty));
#endif
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return nullptr;
+ UNREACHABLE();
}
}
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 243d124..2c4b15c 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -18,6 +18,8 @@
#define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_
#include <vector>
+
+#include "base/arena_object.h"
#include "handle_scope.h"
#include "primitive.h"
#include "thread.h"
@@ -26,7 +28,7 @@
namespace art {
// Top-level abstraction for different calling conventions.
-class CallingConvention {
+class CallingConvention : public DeletableArenaObject<kArenaAllocCallingConvention> {
public:
bool IsReturnAReference() const { return shorty_[0] == 'L'; }
@@ -221,9 +223,11 @@
// | { Method* } | <-- SP
class ManagedRuntimeCallingConvention : public CallingConvention {
public:
- static ManagedRuntimeCallingConvention* Create(bool is_static, bool is_synchronized,
- const char* shorty,
- InstructionSet instruction_set);
+ static std::unique_ptr<ManagedRuntimeCallingConvention> Create(ArenaAllocator* arena,
+ bool is_static,
+ bool is_synchronized,
+ const char* shorty,
+ InstructionSet instruction_set);
// Register that holds the incoming method argument
virtual ManagedRegister MethodRegister() = 0;
@@ -249,7 +253,9 @@
virtual const ManagedRegisterEntrySpills& EntrySpills() = 0;
protected:
- ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty,
+ ManagedRuntimeCallingConvention(bool is_static,
+ bool is_synchronized,
+ const char* shorty,
size_t frame_pointer_size)
: CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {}
};
@@ -270,8 +276,11 @@
// callee saves for frames above this one.
class JniCallingConvention : public CallingConvention {
public:
- static JniCallingConvention* Create(bool is_static, bool is_synchronized, const char* shorty,
- InstructionSet instruction_set);
+ static std::unique_ptr<JniCallingConvention> Create(ArenaAllocator* arena,
+ bool is_static,
+ bool is_synchronized,
+ const char* shorty,
+ InstructionSet instruction_set);
// Size of frame excluding space for outgoing args (its assumed Method* is
// always at the bottom of a frame, but this doesn't work for outgoing
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index b8cda24..27714b8 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -22,6 +22,7 @@
#include <fstream>
#include "art_method.h"
+#include "base/arena_allocator.h"
#include "base/logging.h"
#include "base/macros.h"
#include "calling_convention.h"
@@ -69,13 +70,18 @@
InstructionSet instruction_set = driver->GetInstructionSet();
const InstructionSetFeatures* instruction_set_features = driver->GetInstructionSetFeatures();
const bool is_64_bit_target = Is64BitInstructionSet(instruction_set);
+
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+
// Calling conventions used to iterate over parameters to method
std::unique_ptr<JniCallingConvention> main_jni_conv(
- JniCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
+ JniCallingConvention::Create(&arena, is_static, is_synchronized, shorty, instruction_set));
bool reference_return = main_jni_conv->IsReturnAReference();
std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
- ManagedRuntimeCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
+ ManagedRuntimeCallingConvention::Create(
+ &arena, is_static, is_synchronized, shorty, instruction_set));
// Calling conventions to call into JNI method "end" possibly passing a returned reference, the
// method and the current thread.
@@ -90,11 +96,12 @@
jni_end_shorty = "V";
}
- std::unique_ptr<JniCallingConvention> end_jni_conv(
- JniCallingConvention::Create(is_static, is_synchronized, jni_end_shorty, instruction_set));
+ std::unique_ptr<JniCallingConvention> end_jni_conv(JniCallingConvention::Create(
+ &arena, is_static, is_synchronized, jni_end_shorty, instruction_set));
// Assembler that holds generated instructions
- std::unique_ptr<Assembler> jni_asm(Assembler::Create(instruction_set, instruction_set_features));
+ std::unique_ptr<Assembler> jni_asm(
+ Assembler::Create(&arena, instruction_set, instruction_set_features));
jni_asm->cfi().SetEnabled(driver->GetCompilerOptions().GenerateAnyDebugInfo());
// Offsets into data structures
diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc
index 582ecb3..fa49fc4 100644
--- a/compiler/linker/arm/relative_patcher_thumb2.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2.cc
@@ -79,7 +79,9 @@
std::vector<uint8_t> Thumb2RelativePatcher::CompileThunkCode() {
// The thunk just uses the entry point in the ArtMethod. This works even for calls
// to the generic JNI and interpreter trampolines.
- arm::Thumb2Assembler assembler;
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ arm::Thumb2Assembler assembler(&arena);
assembler.LoadFromOffset(
arm::kLoadWord, arm::PC, arm::R0,
ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index e3e3121..b4ecbd8 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -247,7 +247,9 @@
std::vector<uint8_t> Arm64RelativePatcher::CompileThunkCode() {
// The thunk just uses the entry point in the ArtMethod. This works even for calls
// to the generic JNI and interpreter trampolines.
- arm64::Arm64Assembler assembler;
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ arm64::Arm64Assembler assembler(&arena);
Offset offset(ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArm64PointerSize).Int32Value());
assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0));
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 25c671e..e804bee 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1407,7 +1407,7 @@
offset = CompiledCode::AlignCode(offset, instruction_set); \
adjusted_offset = offset + CompiledCode::CodeDelta(instruction_set); \
oat_header_->Set ## fn_name ## Offset(adjusted_offset); \
- field.reset(compiler_driver_->Create ## fn_name()); \
+ field = compiler_driver_->Create ## fn_name(); \
offset += field->size();
DO_TRAMPOLINE(jni_dlsym_lookup_, JniDlsymLookup);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index a771cc1..e7fa4e4 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -187,7 +187,8 @@
void CodeGenerator::GenerateSlowPaths() {
size_t code_start = 0;
- for (SlowPathCode* slow_path : slow_paths_) {
+ for (const std::unique_ptr<SlowPathCode>& slow_path_unique_ptr : slow_paths_) {
+ SlowPathCode* slow_path = slow_path_unique_ptr.get();
current_slow_path_ = slow_path;
if (disasm_info_ != nullptr) {
code_start = GetAssembler()->CodeSize();
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 87832a2..d69c410 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -67,7 +67,7 @@
DISALLOW_COPY_AND_ASSIGN(CodeAllocator);
};
-class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
+class SlowPathCode : public DeletableArenaObject<kArenaAllocSlowPaths> {
public:
explicit SlowPathCode(HInstruction* instruction) : instruction_(instruction) {
for (size_t i = 0; i < kMaximumNumberOfExpectedRegisters; ++i) {
@@ -205,7 +205,7 @@
virtual const Assembler& GetAssembler() const = 0;
virtual size_t GetWordSize() const = 0;
virtual size_t GetFloatingPointSpillSlotSize() const = 0;
- virtual uintptr_t GetAddressOf(HBasicBlock* block) const = 0;
+ virtual uintptr_t GetAddressOf(HBasicBlock* block) = 0;
void InitializeCodeGeneration(size_t number_of_spill_slots,
size_t maximum_number_of_live_core_registers,
size_t maximum_number_of_live_fpu_registers,
@@ -298,8 +298,9 @@
// save live registers, which may be needed by the runtime to set catch phis.
bool IsImplicitNullCheckAllowed(HNullCheck* null_check) const;
+ // TODO: Avoid creating the `std::unique_ptr` here.
void AddSlowPath(SlowPathCode* slow_path) {
- slow_paths_.push_back(slow_path);
+ slow_paths_.push_back(std::unique_ptr<SlowPathCode>(slow_path));
}
void BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item);
@@ -617,7 +618,7 @@
HGraph* const graph_;
const CompilerOptions& compiler_options_;
- ArenaVector<SlowPathCode*> slow_paths_;
+ ArenaVector<std::unique_ptr<SlowPathCode>> slow_paths_;
// The current slow-path that we're generating code for.
SlowPathCode* current_slow_path_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 3049128..45d23fe 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -777,7 +777,7 @@
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
- assembler_(),
+ assembler_(graph->GetArena()),
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 144d58d..0020f7b 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -339,7 +339,7 @@
return assembler_;
}
- uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
return GetLabelOf(block)->Position();
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c978aaa..e8e6b68 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -899,11 +899,12 @@
callee_saved_fp_registers.list(),
compiler_options,
stats),
- block_labels_(nullptr),
+ block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
+ assembler_(graph->GetArena()),
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -927,7 +928,7 @@
#define __ GetVIXLAssembler()->
void CodeGeneratorARM64::EmitJumpTables() {
- for (auto jump_table : jump_tables_) {
+ for (auto&& jump_table : jump_tables_) {
jump_table->EmitTable(this);
}
}
@@ -4783,8 +4784,7 @@
__ B(codegen_->GetLabelOf(default_block));
}
} else {
- JumpTableARM64* jump_table = new (GetGraph()->GetArena()) JumpTableARM64(switch_instr);
- codegen_->AddJumpTable(jump_table);
+ JumpTableARM64* jump_table = codegen_->CreateJumpTable(switch_instr);
UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index ec46a34..422963e 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -83,7 +83,7 @@
DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64);
};
-class JumpTableARM64 : public ArenaObject<kArenaAllocSwitchTable> {
+class JumpTableARM64 : public DeletableArenaObject<kArenaAllocSwitchTable> {
public:
explicit JumpTableARM64(HPackedSwitch* switch_instr)
: switch_instr_(switch_instr), table_start_() {}
@@ -352,8 +352,9 @@
void Bind(HBasicBlock* block) OVERRIDE;
- vixl::Label* GetLabelOf(HBasicBlock* block) const {
- return CommonGetLabelOf<vixl::Label>(block_labels_, block);
+ vixl::Label* GetLabelOf(HBasicBlock* block) {
+ block = FirstNonEmptyBlock(block);
+ return &(block_labels_[block->GetBlockId()]);
}
size_t GetWordSize() const OVERRIDE {
@@ -365,7 +366,7 @@
return kArm64WordSize;
}
- uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
vixl::Label* block_entry_label = GetLabelOf(block);
DCHECK(block_entry_label->IsBound());
return block_entry_label->location();
@@ -413,11 +414,12 @@
}
void Initialize() OVERRIDE {
- block_labels_ = CommonInitializeLabels<vixl::Label>();
+ block_labels_.resize(GetGraph()->GetBlocks().size());
}
- void AddJumpTable(JumpTableARM64* jump_table) {
- jump_tables_.push_back(jump_table);
+ JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) {
+ jump_tables_.emplace_back(new (GetGraph()->GetArena()) JumpTableARM64(switch_instr));
+ return jump_tables_.back().get();
}
void Finalize(CodeAllocator* allocator) OVERRIDE;
@@ -616,9 +618,10 @@
void EmitJumpTables();
// Labels for each block that will be compiled.
- vixl::Label* block_labels_; // Indexed by block id.
+ // We use a deque so that the `vixl::Label` objects do not move in memory.
+ ArenaDeque<vixl::Label> block_labels_; // Indexed by block id.
vixl::Label frame_entry_label_;
- ArenaVector<JumpTableARM64*> jump_tables_;
+ ArenaVector<std::unique_ptr<JumpTableARM64>> jump_tables_;
LocationsBuilderARM64 location_builder_;
InstructionCodeGeneratorARM64 instruction_visitor_;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 185397c..12d1164 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -471,7 +471,7 @@
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
- assembler_(&isa_features),
+ assembler_(graph->GetArena(), &isa_features),
isa_features_(isa_features) {
// Save RA (containing the return address) to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(RA));
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 5e6fec8..435a869 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -275,7 +275,7 @@
size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return kMipsDoublewordSize; }
- uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
return assembler_.GetLabelLocation(GetLabelOf(block));
}
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 246f5b7..56ac38e 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -417,6 +417,7 @@
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
+ assembler_(graph->GetArena()),
isa_features_(isa_features) {
// Save RA (containing the return address) to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(RA));
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 4e15cdd..9785a2e 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -271,7 +271,7 @@
size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return kMips64DoublewordSize; }
- uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
return assembler_.GetLabelLocation(GetLabelOf(block));
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 304cf08..1a4e62e 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -795,13 +795,16 @@
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
+ assembler_(graph->GetArena()),
isa_features_(isa_features),
method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
simple_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ constant_area_start_(-1),
+ fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ method_address_offset_(-1) {
// Use a fake return address register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 69a6253..1739eec 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -361,7 +361,7 @@
return assembler_;
}
- uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
return GetLabelOf(block)->Position();
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 056b69b..59cc444 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1007,6 +1007,7 @@
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
+ assembler_(graph->GetArena()),
isa_features_(isa_features),
constant_area_start_(0),
method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index d7ce7c6..3a211c5 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -346,7 +346,7 @@
return &move_resolver_;
}
- uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
return GetLabelOf(block)->Position();
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index cad94c7..3670ce2 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -948,13 +948,11 @@
info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
info.code_info = stack_map_size == 0 ? nullptr : stack_map_data;
info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
- ArrayRef<const uint8_t> elf_file = debug::WriteDebugElfFileForMethods(
+ std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForMethods(
GetCompilerDriver()->GetInstructionSet(),
GetCompilerDriver()->GetInstructionSetFeatures(),
ArrayRef<const debug::MethodDebugInfo>(&info, 1));
- CreateJITCodeEntryForAddress(code_address,
- std::unique_ptr<const uint8_t[]>(elf_file.data()),
- elf_file.size());
+ CreateJITCodeEntryForAddress(code_address, std::move(elf_file));
}
Runtime::Current()->GetJit()->AddMemoryUsage(method, arena.BytesUsed());
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 48465e6..1ee1c4d 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -16,6 +16,7 @@
#include "trampoline_compiler.h"
+#include "base/arena_allocator.h"
#include "jni_env_ext.h"
#ifdef ART_ENABLE_CODEGEN_arm
@@ -48,9 +49,9 @@
#ifdef ART_ENABLE_CODEGEN_arm
namespace arm {
-static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
- ThreadOffset<4> offset) {
- Thumb2Assembler assembler;
+static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<4> offset) {
+ Thumb2Assembler assembler(arena);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (R0) in interpreter ABI.
@@ -68,19 +69,19 @@
__ FinalizeCode();
size_t cs = __ CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
- MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+ MemoryRegion code(entry_stub->data(), entry_stub->size());
__ FinalizeInstructions(code);
- return entry_stub.release();
+ return std::move(entry_stub);
}
} // namespace arm
#endif // ART_ENABLE_CODEGEN_arm
#ifdef ART_ENABLE_CODEGEN_arm64
namespace arm64 {
-static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
- ThreadOffset<8> offset) {
- Arm64Assembler assembler;
+static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<8> offset) {
+ Arm64Assembler assembler(arena);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (X0) in interpreter ABI.
@@ -107,19 +108,19 @@
__ FinalizeCode();
size_t cs = __ CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
- MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+ MemoryRegion code(entry_stub->data(), entry_stub->size());
__ FinalizeInstructions(code);
- return entry_stub.release();
+ return std::move(entry_stub);
}
} // namespace arm64
#endif // ART_ENABLE_CODEGEN_arm64
#ifdef ART_ENABLE_CODEGEN_mips
namespace mips {
-static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
- ThreadOffset<4> offset) {
- MipsAssembler assembler;
+static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<4> offset) {
+ MipsAssembler assembler(arena);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI.
@@ -139,19 +140,19 @@
__ FinalizeCode();
size_t cs = __ CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
- MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+ MemoryRegion code(entry_stub->data(), entry_stub->size());
__ FinalizeInstructions(code);
- return entry_stub.release();
+ return std::move(entry_stub);
}
} // namespace mips
#endif // ART_ENABLE_CODEGEN_mips
#ifdef ART_ENABLE_CODEGEN_mips64
namespace mips64 {
-static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
- ThreadOffset<8> offset) {
- Mips64Assembler assembler;
+static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<8> offset) {
+ Mips64Assembler assembler(arena);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI.
@@ -171,18 +172,19 @@
__ FinalizeCode();
size_t cs = __ CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
- MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+ MemoryRegion code(entry_stub->data(), entry_stub->size());
__ FinalizeInstructions(code);
- return entry_stub.release();
+ return std::move(entry_stub);
}
} // namespace mips64
#endif // ART_ENABLE_CODEGEN_mips
#ifdef ART_ENABLE_CODEGEN_x86
namespace x86 {
-static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset<4> offset) {
- X86Assembler assembler;
+static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
+ ThreadOffset<4> offset) {
+ X86Assembler assembler(arena);
// All x86 trampolines call via the Thread* held in fs.
__ fs()->jmp(Address::Absolute(offset));
@@ -191,18 +193,19 @@
__ FinalizeCode();
size_t cs = __ CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
- MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+ MemoryRegion code(entry_stub->data(), entry_stub->size());
__ FinalizeInstructions(code);
- return entry_stub.release();
+ return std::move(entry_stub);
}
} // namespace x86
#endif // ART_ENABLE_CODEGEN_x86
#ifdef ART_ENABLE_CODEGEN_x86_64
namespace x86_64 {
-static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset<8> offset) {
- x86_64::X86_64Assembler assembler;
+static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
+ ThreadOffset<8> offset) {
+ x86_64::X86_64Assembler assembler(arena);
// All x86 trampolines call via the Thread* held in gs.
__ gs()->jmp(x86_64::Address::Absolute(offset, true));
@@ -211,28 +214,31 @@
__ FinalizeCode();
size_t cs = __ CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
- MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+ MemoryRegion code(entry_stub->data(), entry_stub->size());
__ FinalizeInstructions(code);
- return entry_stub.release();
+ return std::move(entry_stub);
}
} // namespace x86_64
#endif // ART_ENABLE_CODEGEN_x86_64
-const std::vector<uint8_t>* CreateTrampoline64(InstructionSet isa, EntryPointCallingConvention abi,
- ThreadOffset<8> offset) {
+std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet isa,
+ EntryPointCallingConvention abi,
+ ThreadOffset<8> offset) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
switch (isa) {
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
- return arm64::CreateTrampoline(abi, offset);
+ return arm64::CreateTrampoline(&arena, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
- return mips64::CreateTrampoline(abi, offset);
+ return mips64::CreateTrampoline(&arena, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
- return x86_64::CreateTrampoline(offset);
+ return x86_64::CreateTrampoline(&arena, offset);
#endif
default:
UNUSED(abi);
@@ -242,22 +248,25 @@
}
}
-const std::vector<uint8_t>* CreateTrampoline32(InstructionSet isa, EntryPointCallingConvention abi,
- ThreadOffset<4> offset) {
+std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet isa,
+ EntryPointCallingConvention abi,
+ ThreadOffset<4> offset) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
switch (isa) {
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2:
- return arm::CreateTrampoline(abi, offset);
+ return arm::CreateTrampoline(&arena, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
- return mips::CreateTrampoline(abi, offset);
+ return mips::CreateTrampoline(&arena, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
UNUSED(abi);
- return x86::CreateTrampoline(offset);
+ return x86::CreateTrampoline(&arena, offset);
#endif
default:
LOG(FATAL) << "Unexpected InstructionSet: " << isa;
diff --git a/compiler/trampolines/trampoline_compiler.h b/compiler/trampolines/trampoline_compiler.h
index 66d5ac3..8f823f1 100644
--- a/compiler/trampolines/trampoline_compiler.h
+++ b/compiler/trampolines/trampoline_compiler.h
@@ -25,12 +25,12 @@
namespace art {
// Create code that will invoke the function held in thread local storage.
-const std::vector<uint8_t>* CreateTrampoline32(InstructionSet isa,
- EntryPointCallingConvention abi,
- ThreadOffset<4> entry_point_offset);
-const std::vector<uint8_t>* CreateTrampoline64(InstructionSet isa,
- EntryPointCallingConvention abi,
- ThreadOffset<8> entry_point_offset);
+std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet isa,
+ EntryPointCallingConvention abi,
+ ThreadOffset<4> entry_point_offset);
+std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet isa,
+ EntryPointCallingConvention abi,
+ ThreadOffset<8> entry_point_offset);
} // namespace art
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index dead8fd..e5f91dc 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -845,7 +845,7 @@
void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
ArmManagedRegister scratch = mscratch.AsArm();
- ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch, stack_adjust);
+ ArmExceptionSlowPath* slow = new (GetArena()) ArmExceptionSlowPath(scratch, stack_adjust);
buffer_.EnqueueSlowPath(slow);
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
TR, Thread::ExceptionOffset<4>().Int32Value());
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index a894565..ffbe786 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -20,6 +20,8 @@
#include <type_traits>
#include <vector>
+#include "base/arena_allocator.h"
+#include "base/arena_containers.h"
#include "base/bit_utils.h"
#include "base/logging.h"
#include "base/stl_util.h"
@@ -1078,6 +1080,9 @@
}
protected:
+ explicit ArmAssembler(ArenaAllocator* arena)
+ : Assembler(arena), tracked_labels_(arena->Adapter(kArenaAllocAssembler)) {}
+
// Returns whether or not the given register is used for passing parameters.
static int RegisterCompare(const Register* reg1, const Register* reg2) {
return *reg1 - *reg2;
@@ -1086,7 +1091,7 @@
void FinalizeTrackedLabels();
// Tracked labels. Use a vector, as we need to sort before adjusting.
- std::vector<Label*> tracked_labels_;
+ ArenaVector<Label*> tracked_labels_;
};
// Slowpath entered when Thread::Current()->_exception is non-null
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index e3e05ca..bc6020e 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -30,8 +30,7 @@
class Arm32Assembler FINAL : public ArmAssembler {
public:
- Arm32Assembler() {
- }
+ explicit Arm32Assembler(ArenaAllocator* arena) : ArmAssembler(arena) {}
virtual ~Arm32Assembler() {}
bool IsThumb() const OVERRIDE {
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 15298b3..26f7d0d 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -59,8 +59,8 @@
return;
}
// Create and fill in the fixup_dependents_.
- assembler->fixup_dependents_.reset(new FixupId[number_of_dependents]);
- FixupId* dependents = assembler->fixup_dependents_.get();
+ assembler->fixup_dependents_.resize(number_of_dependents);
+ FixupId* dependents = assembler->fixup_dependents_.data();
for (FixupId fixup_id = 0u; fixup_id != end_id; ++fixup_id) {
uint32_t target = fixups[fixup_id].target_;
if (target > fixups[fixup_id].location_) {
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 6b61aca..111a6b0 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -21,6 +21,7 @@
#include <utility>
#include <vector>
+#include "base/arena_containers.h"
#include "base/logging.h"
#include "constants_arm.h"
#include "utils/arm/managed_register_arm.h"
@@ -33,14 +34,16 @@
class Thumb2Assembler FINAL : public ArmAssembler {
public:
- explicit Thumb2Assembler(bool can_relocate_branches = true)
- : can_relocate_branches_(can_relocate_branches),
+ explicit Thumb2Assembler(ArenaAllocator* arena, bool can_relocate_branches = true)
+ : ArmAssembler(arena),
+ can_relocate_branches_(can_relocate_branches),
force_32bit_(false),
it_cond_index_(kNoItCondition),
next_condition_(AL),
- fixups_(),
- fixup_dependents_(),
- literals_(),
+ fixups_(arena->Adapter(kArenaAllocAssembler)),
+ fixup_dependents_(arena->Adapter(kArenaAllocAssembler)),
+ literals_(arena->Adapter(kArenaAllocAssembler)),
+ jump_tables_(arena->Adapter(kArenaAllocAssembler)),
last_position_adjustment_(0u),
last_old_position_(0u),
last_fixup_id_(0u) {
@@ -558,9 +561,9 @@
// Prepare the assembler->fixup_dependents_ and each Fixup's dependents_start_/count_.
static void PrepareDependents(Thumb2Assembler* assembler);
- ArrayRef<FixupId> Dependents(const Thumb2Assembler& assembler) const {
- return ArrayRef<FixupId>(assembler.fixup_dependents_.get() + dependents_start_,
- dependents_count_);
+ ArrayRef<const FixupId> Dependents(const Thumb2Assembler& assembler) const {
+ return ArrayRef<const FixupId>(assembler.fixup_dependents_).SubArray(dependents_start_,
+ dependents_count_);
}
// Resolve a branch when the target is known.
@@ -839,15 +842,15 @@
static int16_t AdrEncoding16(Register rd, int32_t offset);
static int32_t AdrEncoding32(Register rd, int32_t offset);
- std::vector<Fixup> fixups_;
- std::unique_ptr<FixupId[]> fixup_dependents_;
+ ArenaVector<Fixup> fixups_;
+ ArenaVector<FixupId> fixup_dependents_;
// Use std::deque<> for literal labels to allow insertions at the end
// without invalidating pointers and references to existing elements.
- std::deque<Literal> literals_;
+ ArenaDeque<Literal> literals_;
// Jump table list.
- std::deque<JumpTable> jump_tables_;
+ ArenaDeque<JumpTable> jump_tables_;
// Data for AdjustedPosition(), see the description there.
uint32_t last_position_adjustment_;
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 0e17512..eb5112b 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -32,10 +32,8 @@
#endif
void Arm64Assembler::FinalizeCode() {
- if (!exception_blocks_.empty()) {
- for (size_t i = 0; i < exception_blocks_.size(); i++) {
- EmitExceptionPoll(exception_blocks_.at(i));
- }
+ for (const std::unique_ptr<Arm64Exception>& exception : exception_blocks_) {
+ EmitExceptionPoll(exception.get());
}
___ FinalizeCode();
}
@@ -613,10 +611,9 @@
void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
CHECK_ALIGNED(stack_adjust, kStackAlignment);
Arm64ManagedRegister scratch = m_scratch.AsArm64();
- Arm64Exception *current_exception = new Arm64Exception(scratch, stack_adjust);
- exception_blocks_.push_back(current_exception);
+ exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust));
LoadFromOffset(scratch.AsXRegister(), TR, Thread::ExceptionOffset<8>().Int32Value());
- ___ Cbnz(reg_x(scratch.AsXRegister()), current_exception->Entry());
+ ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
}
void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 7b25b8f..c4e5de7 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -21,6 +21,7 @@
#include <memory>
#include <vector>
+#include "base/arena_containers.h"
#include "base/logging.h"
#include "constants_arm64.h"
#include "utils/arm64/managed_register_arm64.h"
@@ -61,13 +62,34 @@
kStoreDWord
};
-class Arm64Exception;
+class Arm64Exception {
+ private:
+ Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {
+ }
+
+ vixl::Label* Entry() { return &exception_entry_; }
+
+ // Register used for passing Thread::Current()->exception_ .
+ const Arm64ManagedRegister scratch_;
+
+ // Stack adjust for ExceptionPool.
+ const size_t stack_adjust_;
+
+ vixl::Label exception_entry_;
+
+ friend class Arm64Assembler;
+ DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
+};
class Arm64Assembler FINAL : public Assembler {
public:
// We indicate the size of the initial code generation buffer to the VIXL
// assembler. From there we it will automatically manage the buffer.
- Arm64Assembler() : vixl_masm_(new vixl::MacroAssembler(kArm64BaseBufferSize)) {}
+ explicit Arm64Assembler(ArenaAllocator* arena)
+ : Assembler(arena),
+ exception_blocks_(arena->Adapter(kArenaAllocAssembler)),
+ vixl_masm_(new vixl::MacroAssembler(kArm64BaseBufferSize)) {}
virtual ~Arm64Assembler() {
delete vixl_masm_;
@@ -249,7 +271,7 @@
void AddConstant(XRegister rd, XRegister rn, int32_t value, vixl::Condition cond = vixl::al);
// List of exception blocks to generate at the end of the code cache.
- std::vector<Arm64Exception*> exception_blocks_;
+ ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
public:
// Vixl assembler.
@@ -259,26 +281,6 @@
friend class Arm64ManagedRegister_VixlRegisters_Test;
};
-class Arm64Exception {
- private:
- Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust)
- : scratch_(scratch), stack_adjust_(stack_adjust) {
- }
-
- vixl::Label* Entry() { return &exception_entry_; }
-
- // Register used for passing Thread::Current()->exception_ .
- const Arm64ManagedRegister scratch_;
-
- // Stack adjust for ExceptionPool.
- const size_t stack_adjust_;
-
- vixl::Label exception_entry_;
-
- friend class Arm64Assembler;
- DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
-};
-
} // namespace arm64
} // namespace art
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index f784d2c..c2aa574 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -44,14 +44,10 @@
namespace art {
-static uint8_t* NewContents(size_t capacity) {
- return new uint8_t[capacity];
-}
-
-
-AssemblerBuffer::AssemblerBuffer() {
+AssemblerBuffer::AssemblerBuffer(ArenaAllocator* arena)
+ : arena_(arena) {
static const size_t kInitialBufferCapacity = 4 * KB;
- contents_ = NewContents(kInitialBufferCapacity);
+ contents_ = arena_->AllocArray<uint8_t>(kInitialBufferCapacity);
cursor_ = contents_;
limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
fixup_ = nullptr;
@@ -68,7 +64,9 @@
AssemblerBuffer::~AssemblerBuffer() {
- delete[] contents_;
+ if (arena_->IsRunningOnMemoryTool()) {
+ arena_->MakeInaccessible(contents_, Capacity());
+ }
}
@@ -100,19 +98,12 @@
new_capacity = std::max(new_capacity, min_capacity);
// Allocate the new data area and copy contents of the old one to it.
- uint8_t* new_contents = NewContents(new_capacity);
- memmove(reinterpret_cast<void*>(new_contents),
- reinterpret_cast<void*>(contents_),
- old_size);
-
- // Compute the relocation delta and switch to the new contents area.
- ptrdiff_t delta = new_contents - contents_;
- delete[] contents_;
- contents_ = new_contents;
+ contents_ = reinterpret_cast<uint8_t*>(
+ arena_->Realloc(contents_, old_capacity, new_capacity, kArenaAllocAssembler));
// Update the cursor and recompute the limit.
- cursor_ += delta;
- limit_ = ComputeLimit(new_contents, new_capacity);
+ cursor_ = contents_ + old_size;
+ limit_ = ComputeLimit(contents_, new_capacity);
// Verify internal state.
CHECK_EQ(Capacity(), new_capacity);
@@ -129,36 +120,40 @@
}
}
-Assembler* Assembler::Create(InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features) {
+std::unique_ptr<Assembler> Assembler::Create(
+ ArenaAllocator* arena,
+ InstructionSet instruction_set,
+ const InstructionSetFeatures* instruction_set_features) {
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
- return new arm::Arm32Assembler();
+ return std::unique_ptr<Assembler>(new (arena) arm::Arm32Assembler(arena));
case kThumb2:
- return new arm::Thumb2Assembler();
+ return std::unique_ptr<Assembler>(new (arena) arm::Thumb2Assembler(arena));
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
- return new arm64::Arm64Assembler();
+ return std::unique_ptr<Assembler>(new (arena) arm64::Arm64Assembler(arena));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
- return new mips::MipsAssembler(instruction_set_features != nullptr
- ? instruction_set_features->AsMipsInstructionSetFeatures()
- : nullptr);
+ return std::unique_ptr<Assembler>(new (arena) mips::MipsAssembler(
+ arena,
+ instruction_set_features != nullptr
+ ? instruction_set_features->AsMipsInstructionSetFeatures()
+ : nullptr));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
- return new mips64::Mips64Assembler();
+ return std::unique_ptr<Assembler>(new (arena) mips64::Mips64Assembler(arena));
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
- return new x86::X86Assembler();
+ return std::unique_ptr<Assembler>(new (arena) x86::X86Assembler(arena));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
- return new x86_64::X86_64Assembler();
+ return std::unique_ptr<Assembler>(new (arena) x86_64::X86_64Assembler(arena));
#endif
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 414ea7e..4ea85a2 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -22,6 +22,8 @@
#include "arch/instruction_set.h"
#include "arch/instruction_set_features.h"
#include "arm/constants_arm.h"
+#include "base/arena_allocator.h"
+#include "base/arena_object.h"
#include "base/logging.h"
#include "base/macros.h"
#include "debug/dwarf/debug_frame_opcode_writer.h"
@@ -60,7 +62,7 @@
};
// Parent of all queued slow paths, emitted during finalization
-class SlowPath {
+class SlowPath : public DeletableArenaObject<kArenaAllocAssembler> {
public:
SlowPath() : next_(nullptr) {}
virtual ~SlowPath() {}
@@ -85,9 +87,13 @@
class AssemblerBuffer {
public:
- AssemblerBuffer();
+ explicit AssemblerBuffer(ArenaAllocator* arena);
~AssemblerBuffer();
+ ArenaAllocator* GetArena() {
+ return arena_;
+ }
+
// Basic support for emitting, loading, and storing.
template<typename T> void Emit(T value) {
CHECK(HasEnsuredCapacity());
@@ -235,6 +241,7 @@
// for a single, fast space check per instruction.
static const int kMinimumGap = 32;
+ ArenaAllocator* arena_;
uint8_t* contents_;
uint8_t* cursor_;
uint8_t* limit_;
@@ -338,10 +345,12 @@
std::vector<DelayedAdvancePC> delayed_advance_pcs_;
};
-class Assembler {
+class Assembler : public DeletableArenaObject<kArenaAllocAssembler> {
public:
- static Assembler* Create(InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features = nullptr);
+ static std::unique_ptr<Assembler> Create(
+ ArenaAllocator* arena,
+ InstructionSet instruction_set,
+ const InstructionSetFeatures* instruction_set_features = nullptr);
// Finalize the code; emit slow paths, fixup branches, add literal pool, etc.
virtual void FinalizeCode() { buffer_.EmitSlowPaths(this); }
@@ -504,7 +513,11 @@
DebugFrameOpCodeWriterForAssembler& cfi() { return cfi_; }
protected:
- Assembler() : buffer_(), cfi_(this) {}
+ explicit Assembler(ArenaAllocator* arena) : buffer_(arena), cfi_(this) {}
+
+ ArenaAllocator* GetArena() {
+ return buffer_.GetArena();
+ }
AssemblerBuffer buffer_;
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 2579ddb..084e901 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -460,7 +460,8 @@
explicit AssemblerTest() {}
void SetUp() OVERRIDE {
- assembler_.reset(new Ass());
+ arena_.reset(new ArenaAllocator(&pool_));
+ assembler_.reset(new (arena_.get()) Ass(arena_.get()));
test_helper_.reset(
new AssemblerTestInfrastructure(GetArchitectureString(),
GetAssemblerCmdName(),
@@ -476,6 +477,8 @@
void TearDown() OVERRIDE {
test_helper_.reset(); // Clean up the helper.
+ assembler_.reset();
+ arena_.reset();
}
// Override this to set up any architecture-specific things, e.g., register vectors.
@@ -919,6 +922,8 @@
static constexpr size_t kWarnManyCombinationsThreshold = 500;
+ ArenaPool pool_;
+ std::unique_ptr<ArenaAllocator> arena_;
std::unique_ptr<Ass> assembler_;
std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 2df9b17..c67cb5a 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -195,11 +195,18 @@
#undef __
+class Thumb2AssemblerTest : public ::testing::Test {
+ public:
+ Thumb2AssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
+
+ ArenaPool pool;
+ ArenaAllocator arena;
+ arm::Thumb2Assembler assembler;
+};
+
#define __ assembler.
-TEST(Thumb2AssemblerTest, SimpleMov) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, SimpleMov) {
__ movs(R0, ShifterOperand(R1));
__ mov(R0, ShifterOperand(R1));
__ mov(R8, ShifterOperand(R9));
@@ -210,8 +217,7 @@
EmitAndCheck(&assembler, "SimpleMov");
}
-TEST(Thumb2AssemblerTest, SimpleMov32) {
- arm::Thumb2Assembler assembler;
+TEST_F(Thumb2AssemblerTest, SimpleMov32) {
__ Force32Bit();
__ mov(R0, ShifterOperand(R1));
@@ -220,9 +226,7 @@
EmitAndCheck(&assembler, "SimpleMov32");
}
-TEST(Thumb2AssemblerTest, SimpleMovAdd) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, SimpleMovAdd) {
__ mov(R0, ShifterOperand(R1));
__ adds(R0, R1, ShifterOperand(R2));
__ add(R0, R1, ShifterOperand(0));
@@ -230,9 +234,7 @@
EmitAndCheck(&assembler, "SimpleMovAdd");
}
-TEST(Thumb2AssemblerTest, DataProcessingRegister) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, DataProcessingRegister) {
// 32 bit variants using low registers.
__ mvn(R0, ShifterOperand(R1), AL, kCcKeep);
__ add(R0, R1, ShifterOperand(R2), AL, kCcKeep);
@@ -364,9 +366,7 @@
EmitAndCheck(&assembler, "DataProcessingRegister");
}
-TEST(Thumb2AssemblerTest, DataProcessingImmediate) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, DataProcessingImmediate) {
__ mov(R0, ShifterOperand(0x55));
__ mvn(R0, ShifterOperand(0x55));
__ add(R0, R1, ShifterOperand(0x55));
@@ -397,9 +397,7 @@
EmitAndCheck(&assembler, "DataProcessingImmediate");
}
-TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediate) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, DataProcessingModifiedImmediate) {
__ mov(R0, ShifterOperand(0x550055));
__ mvn(R0, ShifterOperand(0x550055));
__ add(R0, R1, ShifterOperand(0x550055));
@@ -422,9 +420,7 @@
}
-TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediates) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, DataProcessingModifiedImmediates) {
__ mov(R0, ShifterOperand(0x550055));
__ mov(R0, ShifterOperand(0x55005500));
__ mov(R0, ShifterOperand(0x55555555));
@@ -436,9 +432,7 @@
EmitAndCheck(&assembler, "DataProcessingModifiedImmediates");
}
-TEST(Thumb2AssemblerTest, DataProcessingShiftedRegister) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, DataProcessingShiftedRegister) {
// 16-bit variants.
__ movs(R3, ShifterOperand(R4, LSL, 4));
__ movs(R3, ShifterOperand(R4, LSR, 5));
@@ -467,10 +461,9 @@
EmitAndCheck(&assembler, "DataProcessingShiftedRegister");
}
-TEST(Thumb2AssemblerTest, ShiftImmediate) {
+TEST_F(Thumb2AssemblerTest, ShiftImmediate) {
// Note: This test produces the same results as DataProcessingShiftedRegister
// but it does so using shift functions instead of mov().
- arm::Thumb2Assembler assembler;
// 16-bit variants.
__ Lsl(R3, R4, 4);
@@ -500,9 +493,7 @@
EmitAndCheck(&assembler, "ShiftImmediate");
}
-TEST(Thumb2AssemblerTest, BasicLoad) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, BasicLoad) {
__ ldr(R3, Address(R4, 24));
__ ldrb(R3, Address(R4, 24));
__ ldrh(R3, Address(R4, 24));
@@ -522,9 +513,7 @@
}
-TEST(Thumb2AssemblerTest, BasicStore) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, BasicStore) {
__ str(R3, Address(R4, 24));
__ strb(R3, Address(R4, 24));
__ strh(R3, Address(R4, 24));
@@ -539,9 +528,7 @@
EmitAndCheck(&assembler, "BasicStore");
}
-TEST(Thumb2AssemblerTest, ComplexLoad) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, ComplexLoad) {
__ ldr(R3, Address(R4, 24, Address::Mode::Offset));
__ ldr(R3, Address(R4, 24, Address::Mode::PreIndex));
__ ldr(R3, Address(R4, 24, Address::Mode::PostIndex));
@@ -581,9 +568,7 @@
}
-TEST(Thumb2AssemblerTest, ComplexStore) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, ComplexStore) {
__ str(R3, Address(R4, 24, Address::Mode::Offset));
__ str(R3, Address(R4, 24, Address::Mode::PreIndex));
__ str(R3, Address(R4, 24, Address::Mode::PostIndex));
@@ -608,9 +593,7 @@
EmitAndCheck(&assembler, "ComplexStore");
}
-TEST(Thumb2AssemblerTest, NegativeLoadStore) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, NegativeLoadStore) {
__ ldr(R3, Address(R4, -24, Address::Mode::Offset));
__ ldr(R3, Address(R4, -24, Address::Mode::PreIndex));
__ ldr(R3, Address(R4, -24, Address::Mode::PostIndex));
@@ -670,18 +653,14 @@
EmitAndCheck(&assembler, "NegativeLoadStore");
}
-TEST(Thumb2AssemblerTest, SimpleLoadStoreDual) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, SimpleLoadStoreDual) {
__ strd(R2, Address(R0, 24, Address::Mode::Offset));
__ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
EmitAndCheck(&assembler, "SimpleLoadStoreDual");
}
-TEST(Thumb2AssemblerTest, ComplexLoadStoreDual) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, ComplexLoadStoreDual) {
__ strd(R2, Address(R0, 24, Address::Mode::Offset));
__ strd(R2, Address(R0, 24, Address::Mode::PreIndex));
__ strd(R2, Address(R0, 24, Address::Mode::PostIndex));
@@ -699,9 +678,7 @@
EmitAndCheck(&assembler, "ComplexLoadStoreDual");
}
-TEST(Thumb2AssemblerTest, NegativeLoadStoreDual) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, NegativeLoadStoreDual) {
__ strd(R2, Address(R0, -24, Address::Mode::Offset));
__ strd(R2, Address(R0, -24, Address::Mode::PreIndex));
__ strd(R2, Address(R0, -24, Address::Mode::PostIndex));
@@ -719,9 +696,7 @@
EmitAndCheck(&assembler, "NegativeLoadStoreDual");
}
-TEST(Thumb2AssemblerTest, SimpleBranch) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, SimpleBranch) {
Label l1;
__ mov(R0, ShifterOperand(2));
__ Bind(&l1);
@@ -757,8 +732,7 @@
EmitAndCheck(&assembler, "SimpleBranch");
}
-TEST(Thumb2AssemblerTest, LongBranch) {
- arm::Thumb2Assembler assembler;
+TEST_F(Thumb2AssemblerTest, LongBranch) {
__ Force32Bit();
// 32 bit branches.
Label l1;
@@ -797,9 +771,7 @@
EmitAndCheck(&assembler, "LongBranch");
}
-TEST(Thumb2AssemblerTest, LoadMultiple) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, LoadMultiple) {
// 16 bit.
__ ldm(DB_W, R4, (1 << R0 | 1 << R3));
@@ -813,9 +785,7 @@
EmitAndCheck(&assembler, "LoadMultiple");
}
-TEST(Thumb2AssemblerTest, StoreMultiple) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, StoreMultiple) {
// 16 bit.
__ stm(IA_W, R4, (1 << R0 | 1 << R3));
@@ -830,9 +800,7 @@
EmitAndCheck(&assembler, "StoreMultiple");
}
-TEST(Thumb2AssemblerTest, MovWMovT) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, MovWMovT) {
// Always 32 bit.
__ movw(R4, 0);
__ movw(R4, 0x34);
@@ -848,9 +816,7 @@
EmitAndCheck(&assembler, "MovWMovT");
}
-TEST(Thumb2AssemblerTest, SpecialAddSub) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, SpecialAddSub) {
__ add(R2, SP, ShifterOperand(0x50)); // 16 bit.
__ add(SP, SP, ShifterOperand(0x50)); // 16 bit.
__ add(R8, SP, ShifterOperand(0x50)); // 32 bit.
@@ -869,9 +835,7 @@
EmitAndCheck(&assembler, "SpecialAddSub");
}
-TEST(Thumb2AssemblerTest, LoadFromOffset) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, LoadFromOffset) {
__ LoadFromOffset(kLoadWord, R2, R4, 12);
__ LoadFromOffset(kLoadWord, R2, R4, 0xfff);
__ LoadFromOffset(kLoadWord, R2, R4, 0x1000);
@@ -901,9 +865,7 @@
EmitAndCheck(&assembler, "LoadFromOffset");
}
-TEST(Thumb2AssemblerTest, StoreToOffset) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, StoreToOffset) {
__ StoreToOffset(kStoreWord, R2, R4, 12);
__ StoreToOffset(kStoreWord, R2, R4, 0xfff);
__ StoreToOffset(kStoreWord, R2, R4, 0x1000);
@@ -931,9 +893,7 @@
EmitAndCheck(&assembler, "StoreToOffset");
}
-TEST(Thumb2AssemblerTest, IfThen) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, IfThen) {
__ it(EQ);
__ mov(R1, ShifterOperand(1), EQ);
@@ -964,9 +924,7 @@
EmitAndCheck(&assembler, "IfThen");
}
-TEST(Thumb2AssemblerTest, CbzCbnz) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, CbzCbnz) {
Label l1;
__ cbz(R2, &l1);
__ mov(R1, ShifterOperand(3));
@@ -984,9 +942,7 @@
EmitAndCheck(&assembler, "CbzCbnz");
}
-TEST(Thumb2AssemblerTest, Multiply) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, Multiply) {
__ mul(R0, R1, R0);
__ mul(R0, R1, R2);
__ mul(R8, R9, R8);
@@ -1004,9 +960,7 @@
EmitAndCheck(&assembler, "Multiply");
}
-TEST(Thumb2AssemblerTest, Divide) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, Divide) {
__ sdiv(R0, R1, R2);
__ sdiv(R8, R9, R10);
@@ -1016,9 +970,7 @@
EmitAndCheck(&assembler, "Divide");
}
-TEST(Thumb2AssemblerTest, VMov) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, VMov) {
__ vmovs(S1, 1.0);
__ vmovd(D1, 1.0);
@@ -1029,9 +981,7 @@
}
-TEST(Thumb2AssemblerTest, BasicFloatingPoint) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, BasicFloatingPoint) {
__ vadds(S0, S1, S2);
__ vsubs(S0, S1, S2);
__ vmuls(S0, S1, S2);
@@ -1055,9 +1005,7 @@
EmitAndCheck(&assembler, "BasicFloatingPoint");
}
-TEST(Thumb2AssemblerTest, FloatingPointConversions) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, FloatingPointConversions) {
__ vcvtsd(S2, D2);
__ vcvtds(D2, S2);
@@ -1076,9 +1024,7 @@
EmitAndCheck(&assembler, "FloatingPointConversions");
}
-TEST(Thumb2AssemblerTest, FloatingPointComparisons) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, FloatingPointComparisons) {
__ vcmps(S0, S1);
__ vcmpd(D0, D1);
@@ -1088,35 +1034,27 @@
EmitAndCheck(&assembler, "FloatingPointComparisons");
}
-TEST(Thumb2AssemblerTest, Calls) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, Calls) {
__ blx(LR);
__ bx(LR);
EmitAndCheck(&assembler, "Calls");
}
-TEST(Thumb2AssemblerTest, Breakpoint) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, Breakpoint) {
__ bkpt(0);
EmitAndCheck(&assembler, "Breakpoint");
}
-TEST(Thumb2AssemblerTest, StrR1) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, StrR1) {
__ str(R1, Address(SP, 68));
__ str(R1, Address(SP, 1068));
EmitAndCheck(&assembler, "StrR1");
}
-TEST(Thumb2AssemblerTest, VPushPop) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, VPushPop) {
__ vpushs(S2, 4);
__ vpushd(D2, 4);
@@ -1126,9 +1064,7 @@
EmitAndCheck(&assembler, "VPushPop");
}
-TEST(Thumb2AssemblerTest, Max16BitBranch) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, Max16BitBranch) {
Label l1;
__ b(&l1);
for (int i = 0 ; i < (1 << 11) ; i += 2) {
@@ -1140,9 +1076,7 @@
EmitAndCheck(&assembler, "Max16BitBranch");
}
-TEST(Thumb2AssemblerTest, Branch32) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, Branch32) {
Label l1;
__ b(&l1);
for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
@@ -1154,9 +1088,7 @@
EmitAndCheck(&assembler, "Branch32");
}
-TEST(Thumb2AssemblerTest, CompareAndBranchMax) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, CompareAndBranchMax) {
Label l1;
__ cbz(R4, &l1);
for (int i = 0 ; i < (1 << 7) ; i += 2) {
@@ -1168,9 +1100,7 @@
EmitAndCheck(&assembler, "CompareAndBranchMax");
}
-TEST(Thumb2AssemblerTest, CompareAndBranchRelocation16) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, CompareAndBranchRelocation16) {
Label l1;
__ cbz(R4, &l1);
for (int i = 0 ; i < (1 << 7) + 2 ; i += 2) {
@@ -1182,9 +1112,7 @@
EmitAndCheck(&assembler, "CompareAndBranchRelocation16");
}
-TEST(Thumb2AssemblerTest, CompareAndBranchRelocation32) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, CompareAndBranchRelocation32) {
Label l1;
__ cbz(R4, &l1);
for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
@@ -1196,9 +1124,7 @@
EmitAndCheck(&assembler, "CompareAndBranchRelocation32");
}
-TEST(Thumb2AssemblerTest, MixedBranch32) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, MixedBranch32) {
Label l1;
Label l2;
__ b(&l1); // Forwards.
@@ -1215,9 +1141,7 @@
EmitAndCheck(&assembler, "MixedBranch32");
}
-TEST(Thumb2AssemblerTest, Shifts) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, Shifts) {
// 16 bit selected for CcDontCare.
__ Lsl(R0, R1, 5);
__ Lsr(R0, R1, 5);
@@ -1292,9 +1216,7 @@
EmitAndCheck(&assembler, "Shifts");
}
-TEST(Thumb2AssemblerTest, LoadStoreRegOffset) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, LoadStoreRegOffset) {
// 16 bit.
__ ldr(R0, Address(R1, R2));
__ str(R0, Address(R1, R2));
@@ -1319,9 +1241,7 @@
EmitAndCheck(&assembler, "LoadStoreRegOffset");
}
-TEST(Thumb2AssemblerTest, LoadStoreLiteral) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, LoadStoreLiteral) {
__ ldr(R0, Address(4));
__ str(R0, Address(4));
@@ -1337,9 +1257,7 @@
EmitAndCheck(&assembler, "LoadStoreLiteral");
}
-TEST(Thumb2AssemblerTest, LoadStoreLimits) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, LoadStoreLimits) {
__ ldr(R0, Address(R4, 124)); // 16 bit.
__ ldr(R0, Address(R4, 128)); // 32 bit.
@@ -1367,9 +1285,7 @@
EmitAndCheck(&assembler, "LoadStoreLimits");
}
-TEST(Thumb2AssemblerTest, CompareAndBranch) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, CompareAndBranch) {
Label label;
__ CompareAndBranchIfZero(arm::R0, &label);
__ CompareAndBranchIfZero(arm::R11, &label);
@@ -1380,9 +1296,7 @@
EmitAndCheck(&assembler, "CompareAndBranch");
}
-TEST(Thumb2AssemblerTest, AddConstant) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, AddConstant) {
// Low registers, Rd != Rn.
__ AddConstant(R0, R1, 0); // MOV.
__ AddConstant(R0, R1, 1); // 16-bit ADDS, encoding T1.
@@ -1626,9 +1540,7 @@
EmitAndCheck(&assembler, "AddConstant");
}
-TEST(Thumb2AssemblerTest, CmpConstant) {
- arm::Thumb2Assembler assembler;
-
+TEST_F(Thumb2AssemblerTest, CmpConstant) {
__ CmpConstant(R0, 0); // 16-bit CMP.
__ CmpConstant(R1, 1); // 16-bit CMP.
__ CmpConstant(R0, 7); // 16-bit CMP.
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index ffac4c4..ecb67bd 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -102,8 +102,10 @@
class MipsAssembler FINAL : public Assembler {
public:
- explicit MipsAssembler(const MipsInstructionSetFeatures* instruction_set_features = nullptr)
- : overwriting_(false),
+ explicit MipsAssembler(ArenaAllocator* arena,
+ const MipsInstructionSetFeatures* instruction_set_features = nullptr)
+ : Assembler(arena),
+ overwriting_(false),
overwrite_location_(0),
last_position_adjustment_(0),
last_old_position_(0),
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 71f5e00..8acc38a 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -102,8 +102,9 @@
class Mips64Assembler FINAL : public Assembler {
public:
- Mips64Assembler()
- : overwriting_(false),
+ explicit Mips64Assembler(ArenaAllocator* arena)
+ : Assembler(arena),
+ overwriting_(false),
overwrite_location_(0),
last_position_adjustment_(0),
last_old_position_(0),
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 3efef70..2203646 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -2379,7 +2379,7 @@
}
void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
- X86ExceptionSlowPath* slow = new X86ExceptionSlowPath(stack_adjust);
+ X86ExceptionSlowPath* slow = new (GetArena()) X86ExceptionSlowPath(stack_adjust);
buffer_.EnqueueSlowPath(slow);
fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<4>()), Immediate(0));
j(kNotEqual, slow->Entry());
@@ -2402,7 +2402,7 @@
}
void X86Assembler::AddConstantArea() {
- const std::vector<int32_t>& area = constant_area_.GetBuffer();
+ ArrayRef<const int32_t> area = constant_area_.GetBuffer();
// Generate the data for the literal area.
for (size_t i = 0, e = area.size(); i < e; i++) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 00ff7bd..8567ad2 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -18,12 +18,15 @@
#define ART_COMPILER_UTILS_X86_ASSEMBLER_X86_H_
#include <vector>
+
+#include "base/arena_containers.h"
#include "base/bit_utils.h"
#include "base/macros.h"
#include "constants_x86.h"
#include "globals.h"
#include "managed_register_x86.h"
#include "offsets.h"
+#include "utils/array_ref.h"
#include "utils/assembler.h"
namespace art {
@@ -260,7 +263,7 @@
*/
class ConstantArea {
public:
- ConstantArea() {}
+ explicit ConstantArea(ArenaAllocator* arena) : buffer_(arena->Adapter(kArenaAllocAssembler)) {}
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
@@ -290,18 +293,18 @@
return buffer_.size() * elem_size_;
}
- const std::vector<int32_t>& GetBuffer() const {
- return buffer_;
+ ArrayRef<const int32_t> GetBuffer() const {
+ return ArrayRef<const int32_t>(buffer_);
}
private:
static constexpr size_t elem_size_ = sizeof(int32_t);
- std::vector<int32_t> buffer_;
+ ArenaVector<int32_t> buffer_;
};
class X86Assembler FINAL : public Assembler {
public:
- X86Assembler() {}
+ explicit X86Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {}
virtual ~X86Assembler() {}
/*
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index d0d5147..1d1df6e 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -16,13 +16,16 @@
#include "assembler_x86.h"
+#include "base/arena_allocator.h"
#include "base/stl_util.h"
#include "utils/assembler_test.h"
namespace art {
TEST(AssemblerX86, CreateBuffer) {
- AssemblerBuffer buffer;
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ AssemblerBuffer buffer(&arena);
AssemblerBuffer::EnsureCapacity ensured(&buffer);
buffer.Emit<uint8_t>(0x42);
ASSERT_EQ(static_cast<size_t>(1), buffer.Size());
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index d86ad1b..32eb4a3 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -3144,7 +3144,7 @@
};
void X86_64Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
- X86_64ExceptionSlowPath* slow = new X86_64ExceptionSlowPath(stack_adjust);
+ X86_64ExceptionSlowPath* slow = new (GetArena()) X86_64ExceptionSlowPath(stack_adjust);
buffer_.EnqueueSlowPath(slow);
gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<8>(), true), Immediate(0));
j(kNotEqual, slow->Entry());
@@ -3167,7 +3167,7 @@
}
void X86_64Assembler::AddConstantArea() {
- const std::vector<int32_t>& area = constant_area_.GetBuffer();
+ ArrayRef<const int32_t> area = constant_area_.GetBuffer();
for (size_t i = 0, e = area.size(); i < e; i++) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitInt32(area[i]);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index f00cb12..92c7d0a 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -19,12 +19,14 @@
#include <vector>
+#include "base/arena_containers.h"
#include "base/bit_utils.h"
#include "base/macros.h"
#include "constants_x86_64.h"
#include "globals.h"
#include "managed_register_x86_64.h"
#include "offsets.h"
+#include "utils/array_ref.h"
#include "utils/assembler.h"
namespace art {
@@ -270,7 +272,7 @@
*/
class ConstantArea {
public:
- ConstantArea() {}
+ explicit ConstantArea(ArenaAllocator* arena) : buffer_(arena->Adapter(kArenaAllocAssembler)) {}
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
@@ -296,13 +298,13 @@
return buffer_.size() * elem_size_;
}
- const std::vector<int32_t>& GetBuffer() const {
- return buffer_;
+ ArrayRef<const int32_t> GetBuffer() const {
+ return ArrayRef<const int32_t>(buffer_);
}
private:
static constexpr size_t elem_size_ = sizeof(int32_t);
- std::vector<int32_t> buffer_;
+ ArenaVector<int32_t> buffer_;
};
@@ -332,7 +334,7 @@
class X86_64Assembler FINAL : public Assembler {
public:
- X86_64Assembler() {}
+ explicit X86_64Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {}
virtual ~X86_64Assembler() {}
/*
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 4f65709..b19e616 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -27,7 +27,9 @@
namespace art {
TEST(AssemblerX86_64, CreateBuffer) {
- AssemblerBuffer buffer;
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ AssemblerBuffer buffer(&arena);
AssemblerBuffer::EnsureCapacity ensured(&buffer);
buffer.Emit<uint8_t>(0x42);
ASSERT_EQ(static_cast<size_t>(1), buffer.Size());
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 70ff60f..d951089 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -77,9 +77,11 @@
"RegAllocVldt ",
"StackMapStm ",
"CodeGen ",
+ "Assembler ",
"ParallelMove ",
"GraphChecker ",
"Verifier ",
+ "CallingConv ",
};
template <bool kCount>
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 697f7e0..52a1002 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -88,9 +88,11 @@
kArenaAllocRegisterAllocatorValidate,
kArenaAllocStackMapStream,
kArenaAllocCodeGenerator,
+ kArenaAllocAssembler,
kArenaAllocParallelMoveResolver,
kArenaAllocGraphChecker,
kArenaAllocVerifier,
+ kArenaAllocCallingConvention,
kNumArenaAllocKinds
};
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index a432782..97dbe5d 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -292,7 +292,7 @@
// Pop the shadow frame before calling into compiled code.
self->PopShadowFrame();
- ArtInterpreterToCompiledCodeBridge(self, code_item, &shadow_frame, &result);
+ ArtInterpreterToCompiledCodeBridge(self, nullptr, code_item, &shadow_frame, &result);
// Push the shadow frame back as the caller will expect it.
self->PushShadowFrame(&shadow_frame);
@@ -535,6 +535,10 @@
return JValue();
}
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ jit->NotifyCompiledCodeToInterpreterTransition(self, shadow_frame->GetMethod());
+ }
return Execute(self, code_item, *shadow_frame, JValue());
}
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 3453abc..12d70c5 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -503,6 +503,7 @@
uint32_t vregC) ALWAYS_INLINE;
void ArtInterpreterToCompiledCodeBridge(Thread* self,
+ ArtMethod* caller,
const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame,
JValue* result)
@@ -530,6 +531,10 @@
uint16_t arg_offset = (code_item == nullptr)
? 0
: code_item->registers_size_ - code_item->ins_size_;
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr && caller != nullptr) {
+ jit->NotifyInterpreterToCompiledCodeTransition(self, caller);
+ }
method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
(shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
result, method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty());
@@ -726,7 +731,8 @@
target->GetEntryPointFromQuickCompiledCode())) {
ArtInterpreterToInterpreterBridge(self, code_item, new_shadow_frame, result);
} else {
- ArtInterpreterToCompiledCodeBridge(self, code_item, new_shadow_frame, result);
+ ArtInterpreterToCompiledCodeBridge(
+ self, shadow_frame.GetMethod(), code_item, new_shadow_frame, result);
}
} else {
UnstartedRuntime::Invoke(self, code_item, new_shadow_frame, result, first_dest_reg);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index fb98175..e5b89e2 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -635,7 +635,7 @@
jit->InvokeVirtualOrInterface(
self, receiver, sf_method, shadow_frame.GetDexPC(), called_method);
}
- jit->AddSamples(self, sf_method, 1);
+ jit->AddSamples(self, sf_method, 1, /*with_backedges*/false);
}
// TODO: Remove the InvokeVirtualOrInterface instrumentation, as it was only used by the JIT.
if (type == kVirtual || type == kInterface) {
@@ -681,7 +681,7 @@
if (jit != nullptr) {
jit->InvokeVirtualOrInterface(
self, receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
- jit->AddSamples(self, shadow_frame.GetMethod(), 1);
+ jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges*/false);
}
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
// TODO: Remove the InvokeVirtualOrInterface instrumentation, as it was only used by the JIT.
@@ -1001,8 +1001,11 @@
return branch_offset <= 0;
}
-void ArtInterpreterToCompiledCodeBridge(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result);
+void ArtInterpreterToCompiledCodeBridge(Thread* self,
+ ArtMethod* caller,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame,
+ JValue* result);
// Explicitly instantiate all DoInvoke functions.
#define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check) \
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index c95af6f..13cfb98 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -78,7 +78,7 @@
#define HOTNESS_UPDATE() \
do { \
if (jit != nullptr) { \
- jit->AddSamples(self, method, 1); \
+ jit->AddSamples(self, method, 1, /*with_backedges*/ true); \
} \
} while (false)
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index ca1d635..4323d4f 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -89,7 +89,7 @@
#define HOTNESS_UPDATE() \
do { \
if (jit != nullptr) { \
- jit->AddSamples(self, method, 1); \
+ jit->AddSamples(self, method, 1, /*with_backedges*/ true); \
} \
} while (false)
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index e005589..bd1af04 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -689,7 +689,7 @@
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown();
- jit->AddSamples(self, method, count);
+ jit->AddSamples(self, method, count, /*with_backedges*/ true);
}
return MterpSetUpHotnessCountdown(method, shadow_frame);
}
@@ -702,7 +702,7 @@
uint32_t dex_pc = shadow_frame->GetDexPC();
jit::Jit* jit = Runtime::Current()->GetJit();
if ((jit != nullptr) && (offset <= 0)) {
- jit->AddSamples(self, method, 1);
+ jit->AddSamples(self, method, 1, /*with_backedges*/ true);
}
int16_t countdown_value = MterpSetUpHotnessCountdown(method, shadow_frame);
if (countdown_value == jit::kJitCheckForOSR) {
@@ -722,7 +722,7 @@
jit::Jit* jit = Runtime::Current()->GetJit();
if (offset <= 0) {
// Keep updating hotness in case a compilation request was dropped. Eventually it will retry.
- jit->AddSamples(self, method, 1);
+ jit->AddSamples(self, method, 1, /*with_backedges*/ true);
}
// Assumes caller has already determined that an OSR check is appropriate.
return jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index d9d7a19..7cdd7c5 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -70,15 +70,19 @@
static Mutex g_jit_debug_mutex("JIT debug interface lock", kJitDebugInterfaceLock);
-static JITCodeEntry* CreateJITCodeEntryInternal(
- std::unique_ptr<const uint8_t[]> symfile_addr,
- uintptr_t symfile_size)
+static JITCodeEntry* CreateJITCodeEntryInternal(std::vector<uint8_t> symfile)
REQUIRES(g_jit_debug_mutex) {
- DCHECK(symfile_addr.get() != nullptr);
+ DCHECK_NE(symfile.size(), 0u);
+
+ // Make a copy of the buffer. We want to shrink it anyway.
+ uint8_t* symfile_copy = new uint8_t[symfile.size()];
+ CHECK(symfile_copy != nullptr);
+ memcpy(symfile_copy, symfile.data(), symfile.size());
JITCodeEntry* entry = new JITCodeEntry;
- entry->symfile_addr_ = symfile_addr.release();
- entry->symfile_size_ = symfile_size;
+ CHECK(entry != nullptr);
+ entry->symfile_addr_ = symfile_copy;
+ entry->symfile_size_ = symfile.size();
entry->prev_ = nullptr;
entry->next_ = __jit_debug_descriptor.first_entry_;
@@ -111,11 +115,10 @@
delete entry;
}
-JITCodeEntry* CreateJITCodeEntry(std::unique_ptr<const uint8_t[]> symfile_addr,
- uintptr_t symfile_size) {
+JITCodeEntry* CreateJITCodeEntry(std::vector<uint8_t> symfile) {
Thread* self = Thread::Current();
MutexLock mu(self, g_jit_debug_mutex);
- return CreateJITCodeEntryInternal(std::move(symfile_addr), symfile_size);
+ return CreateJITCodeEntryInternal(std::move(symfile));
}
void DeleteJITCodeEntry(JITCodeEntry* entry) {
@@ -128,14 +131,12 @@
// so that the user of the JIT interface does not have to store them.
static std::unordered_map<uintptr_t, JITCodeEntry*> g_jit_code_entries;
-void CreateJITCodeEntryForAddress(uintptr_t address,
- std::unique_ptr<const uint8_t[]> symfile_addr,
- uintptr_t symfile_size) {
+void CreateJITCodeEntryForAddress(uintptr_t address, std::vector<uint8_t> symfile) {
Thread* self = Thread::Current();
MutexLock mu(self, g_jit_debug_mutex);
DCHECK_NE(address, 0u);
DCHECK(g_jit_code_entries.find(address) == g_jit_code_entries.end());
- JITCodeEntry* entry = CreateJITCodeEntryInternal(std::move(symfile_addr), symfile_size);
+ JITCodeEntry* entry = CreateJITCodeEntryInternal(std::move(symfile));
g_jit_code_entries.emplace(address, entry);
}
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index 74469a9..d9bf331 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -19,6 +19,7 @@
#include <inttypes.h>
#include <memory>
+#include <vector>
namespace art {
@@ -28,8 +29,7 @@
// Notify native debugger about new JITed code by passing in-memory ELF.
// It takes ownership of the in-memory ELF file.
-JITCodeEntry* CreateJITCodeEntry(std::unique_ptr<const uint8_t[]> symfile_addr,
- uintptr_t symfile_size);
+JITCodeEntry* CreateJITCodeEntry(std::vector<uint8_t> symfile);
// Notify native debugger that JITed code has been removed.
// It also releases the associated in-memory ELF file.
@@ -38,9 +38,7 @@
// Notify native debugger about new JITed code by passing in-memory ELF.
// The address is used only to uniquely identify the entry.
// It takes ownership of the in-memory ELF file.
-void CreateJITCodeEntryForAddress(uintptr_t address,
- std::unique_ptr<const uint8_t[]> symfile_addr,
- uintptr_t symfile_size);
+void CreateJITCodeEntryForAddress(uintptr_t address, std::vector<uint8_t> symfile);
// Notify native debugger that JITed code has been removed.
// Returns false if entry for the given address was not found.
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 8f126bf..85371bc 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -97,18 +97,17 @@
LOG(FATAL) << "Priority thread weight cannot be 0.";
}
} else {
- jit_options->priority_thread_weight_ =
- std::max(jit_options->compile_threshold_ / 2000, static_cast<size_t>(1));
+ jit_options->priority_thread_weight_ = std::max(
+ jit_options->warmup_threshold_ / Jit::kDefaultPriorityThreadWeightRatio,
+ static_cast<size_t>(1));
}
return jit_options;
}
bool Jit::ShouldUsePriorityThreadWeight() {
- // TODO(calin): verify that IsSensitiveThread covers only the cases we are interested on.
- // In particular if apps can set StrictMode policies for any of their threads, case in which
- // we need to find another way to track sensitive threads.
- return Runtime::Current()->InJankPerceptibleProcessState() && Thread::IsSensitiveThread();
+ return Runtime::Current()->InJankPerceptibleProcessState()
+ && Thread::Current()->IsJitSensitiveThread();
}
void Jit::DumpInfo(std::ostream& os) {
@@ -159,6 +158,8 @@
jit->warm_method_threshold_ = options->GetWarmupThreshold();
jit->osr_method_threshold_ = options->GetOsrThreshold();
jit->priority_thread_weight_ = options->GetPriorityThreadWeight();
+ jit->transition_weight_ = std::max(
+ jit->warm_method_threshold_ / kDefaultTransitionRatio, static_cast<size_t>(1));
jit->CreateThreadPool();
@@ -245,8 +246,17 @@
if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr)) {
return false;
}
+
+ VLOG(jit) << "Compiling method "
+ << PrettyMethod(method_to_compile)
+ << " osr=" << std::boolalpha << osr;
bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, osr);
code_cache_->DoneCompiling(method_to_compile, self, osr);
+ if (!success) {
+ VLOG(jit) << "Failed to compile method "
+ << PrettyMethod(method_to_compile)
+ << " osr=" << std::boolalpha << osr;
+ }
return success;
}
@@ -525,15 +535,9 @@
void Run(Thread* self) OVERRIDE {
ScopedObjectAccess soa(self);
if (kind_ == kCompile) {
- VLOG(jit) << "JitCompileTask compiling method " << PrettyMethod(method_);
- if (!Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false)) {
- VLOG(jit) << "Failed to compile method " << PrettyMethod(method_);
- }
+ Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false);
} else if (kind_ == kCompileOsr) {
- VLOG(jit) << "JitCompileTask compiling method osr " << PrettyMethod(method_);
- if (!Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ true)) {
- VLOG(jit) << "Failed to compile method osr " << PrettyMethod(method_);
- }
+ Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ true);
} else {
DCHECK(kind_ == kAllocateProfile);
if (ProfilingInfo::Create(self, method_, /* retry_allocation */ true)) {
@@ -554,7 +558,7 @@
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
};
-void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count) {
+void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_backedges) {
if (thread_pool_ == nullptr) {
// Should only see this when shutting down.
DCHECK(Runtime::Current()->IsShuttingDown(self));
@@ -578,7 +582,8 @@
}
int32_t new_count = starting_count + count; // int32 here to avoid wrap-around;
if (starting_count < warm_method_threshold_) {
- if (new_count >= warm_method_threshold_) {
+ if ((new_count >= warm_method_threshold_) &&
+ (method->GetProfilingInfo(sizeof(void*)) == nullptr)) {
bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false);
if (success) {
VLOG(jit) << "Start profiling " << PrettyMethod(method);
@@ -600,14 +605,19 @@
// Avoid jumping more than one state at a time.
new_count = std::min(new_count, hot_method_threshold_ - 1);
} else if (starting_count < hot_method_threshold_) {
- if (new_count >= hot_method_threshold_) {
+ if ((new_count >= hot_method_threshold_) &&
+ !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
DCHECK(thread_pool_ != nullptr);
thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile));
}
// Avoid jumping more than one state at a time.
new_count = std::min(new_count, osr_method_threshold_ - 1);
} else if (starting_count < osr_method_threshold_) {
- if (new_count >= osr_method_threshold_) {
+ if (!with_backedges) {
+ // If the samples don't contain any back edge, we don't increment the hotness.
+ return;
+ }
+ if ((new_count >= osr_method_threshold_) && !code_cache_->IsOsrCompiled(method)) {
DCHECK(thread_pool_ != nullptr);
thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr));
}
@@ -635,7 +645,7 @@
!Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) {
method->SetEntryPointFromQuickCompiledCode(profiling_info->GetSavedEntryPoint());
} else {
- AddSamples(thread, method, 1);
+ AddSamples(thread, method, 1, /* with_backedges */false);
}
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 4e2d1ab..8198c18 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -43,6 +43,8 @@
public:
static constexpr bool kStressMode = kIsDebugBuild;
static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 10000;
+ static constexpr size_t kDefaultPriorityThreadWeightRatio = 1000;
+ static constexpr size_t kDefaultTransitionRatio = 100;
virtual ~Jit();
static Jit* Create(JitOptions* options, std::string* error_msg);
@@ -92,7 +94,7 @@
void MethodEntered(Thread* thread, ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_);
- void AddSamples(Thread* self, ArtMethod* method, uint16_t samples)
+ void AddSamples(Thread* self, ArtMethod* method, uint16_t samples, bool with_backedges)
SHARED_REQUIRES(Locks::mutator_lock_);
void InvokeVirtualOrInterface(Thread* thread,
@@ -102,6 +104,16 @@
ArtMethod* callee)
SHARED_REQUIRES(Locks::mutator_lock_);
+ void NotifyInterpreterToCompiledCodeTransition(Thread* self, ArtMethod* caller)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ AddSamples(self, caller, transition_weight_, false);
+ }
+
+ void NotifyCompiledCodeToInterpreterTransition(Thread* self, ArtMethod* callee)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ AddSamples(self, callee, transition_weight_, false);
+ }
+
// Starts the profile saver if the config options allow profile recording.
// The profile will be stored in the specified `filename` and will contain
// information collected from the given `code_paths` (a set of dex locations).
@@ -173,6 +185,7 @@
uint16_t warm_method_threshold_;
uint16_t osr_method_threshold_;
uint16_t priority_thread_weight_;
+ uint16_t transition_weight_;
std::unique_ptr<ThreadPool> thread_pool_;
DISALLOW_COPY_AND_ASSIGN(Jit);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 820ae6a..1f3e08b 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -366,7 +366,7 @@
}
last_update_time_ns_.StoreRelease(NanoTime());
VLOG(jit)
- << "JIT added (osr = " << std::boolalpha << osr << std::noboolalpha << ") "
+ << "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
<< PrettyMethod(method) << "@" << method
<< " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
<< " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
@@ -905,15 +905,18 @@
return last_update_time_ns_.LoadAcquire();
}
+bool JitCodeCache::IsOsrCompiled(ArtMethod* method) {
+ MutexLock mu(Thread::Current(), lock_);
+ return osr_code_map_.find(method) != osr_code_map_.end();
+}
+
bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr) {
if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
- VLOG(jit) << PrettyMethod(method) << " is already compiled";
return false;
}
MutexLock mu(self, lock_);
if (osr && (osr_code_map_.find(method) != osr_code_map_.end())) {
- VLOG(jit) << PrettyMethod(method) << " is already osr compiled";
return false;
}
@@ -928,7 +931,6 @@
}
if (info->IsMethodBeingCompiled(osr)) {
- VLOG(jit) << PrettyMethod(method) << " is already being compiled";
return false;
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 9f18c70..f31cc51 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -186,6 +186,8 @@
void Dump(std::ostream& os) REQUIRES(!lock_);
+ bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
+
private:
// Take ownership of maps.
JitCodeCache(MemMap* code_map,
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index cdf468c..6285542 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -33,8 +33,8 @@
namespace mirror {
inline uint32_t String::ClassSize(size_t pointer_size) {
- uint32_t vtable_entries = Object::kVTableLength + 53;
- return Class::ComputeClassSize(true, vtable_entries, 0, 2, 0, 1, 2, pointer_size);
+ uint32_t vtable_entries = Object::kVTableLength + 56;
+ return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 1, 2, pointer_size);
}
// Sets string count in the allocation code path to ensure it is guarded by a CAS.
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 88a5870..56c0d58 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -216,6 +216,10 @@
Runtime::Current()->GetHeap()->RegisterNativeAllocation(env, static_cast<size_t>(bytes));
}
+static void VMRuntime_registerSensitiveThread(JNIEnv*, jobject) {
+ Runtime::Current()->RegisterSensitiveThread();
+}
+
static void VMRuntime_registerNativeFree(JNIEnv* env, jobject, jint bytes) {
if (UNLIKELY(bytes < 0)) {
ScopedObjectAccess soa(env);
@@ -648,6 +652,7 @@
NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"),
NATIVE_METHOD(VMRuntime, setTargetSdkVersionNative, "(I)V"),
NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(I)V"),
+ NATIVE_METHOD(VMRuntime, registerSensitiveThread, "()V"),
NATIVE_METHOD(VMRuntime, registerNativeFree, "(I)V"),
NATIVE_METHOD(VMRuntime, requestConcurrentGC, "()V"),
NATIVE_METHOD(VMRuntime, requestHeapTrim, "()V"),
diff --git a/runtime/openjdkjvm/OpenjdkJvm.cc b/runtime/openjdkjvm/OpenjdkJvm.cc
index aff9b61..1f33651 100644
--- a/runtime/openjdkjvm/OpenjdkJvm.cc
+++ b/runtime/openjdkjvm/OpenjdkJvm.cc
@@ -116,7 +116,18 @@
/* posix lseek() */
JNIEXPORT jlong JVM_Lseek(jint fd, jlong offset, jint whence) {
- return TEMP_FAILURE_RETRY(lseek(fd, offset, whence));
+#if !defined(__APPLE__)
+ // NOTE: Using TEMP_FAILURE_RETRY here is busted for LP32 on glibc - the return
+ // value will be coerced into an int32_t.
+ //
+ // lseek64 isn't specified to return EINTR so it shouldn't be necessary
+ // anyway.
+ return lseek64(fd, offset, whence);
+#else
+ // NOTE: This code is compiled for Mac OS but isn't ever run on that
+ // platform.
+ return lseek(fd, offset, whence);
+#endif
}
/*
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 2489e45..5dbc6b3 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1981,4 +1981,8 @@
GetHeap()->UpdateProcessState(old_process_state, process_state);
}
+void Runtime::RegisterSensitiveThread() const {
+ Thread::SetJitSensitiveThread();
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index ae25dd1..c507129 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -635,6 +635,8 @@
return process_state_ == kProcessStateJankPerceptible;
}
+ void RegisterSensitiveThread() const;
+
void SetZygoteNoThreadSection(bool val) {
zygote_no_threads_ = val;
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index e015833..a462036 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -89,6 +89,7 @@
ConditionVariable* Thread::resume_cond_ = nullptr;
const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
bool (*Thread::is_sensitive_thread_hook_)() = nullptr;
+Thread* Thread::jit_sensitive_thread_ = nullptr;
static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild;
diff --git a/runtime/thread.h b/runtime/thread.h
index 2218b5a..b7b0591 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1097,6 +1097,12 @@
return debug_disallow_read_barrier_;
}
+ // Returns true if the current thread is the jit sensitive thread.
+ bool IsJitSensitiveThread() const {
+ return this == jit_sensitive_thread_;
+ }
+
+ // Returns true if StrictMode events are traced for the current thread.
static bool IsSensitiveThread() {
if (is_sensitive_thread_hook_ != nullptr) {
return (*is_sensitive_thread_hook_)();
@@ -1179,6 +1185,16 @@
ALWAYS_INLINE void PassActiveSuspendBarriers()
REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
+ // Registers the current thread as the jit sensitive thread. Should be called just once.
+ static void SetJitSensitiveThread() {
+ if (jit_sensitive_thread_ == nullptr) {
+ jit_sensitive_thread_ = Thread::Current();
+ } else {
+ LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
+ << Thread::Current()->GetTid();
+ }
+ }
+
static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
is_sensitive_thread_hook_ = is_sensitive_thread_hook;
}
@@ -1228,6 +1244,8 @@
// Hook passed by framework which returns true
// when StrictMode events are traced for the current thread.
static bool (*is_sensitive_thread_hook_)();
+ // Stores the jit sensitive thread (which for now is the UI thread).
+ static Thread* jit_sensitive_thread_;
/***********************************************************************************************/
// Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index d5319fd..647578e 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -4577,8 +4577,18 @@
// Trying to access C1.field1 using reference of type C2, which is neither C1 or a sub-class
// of C1. For resolution to occur the declared class of the field must be compatible with
// obj_type, we've discovered this wasn't so, so report the field didn't exist.
- Fail(VERIFY_ERROR_NO_FIELD) << "cannot access instance field " << PrettyField(field)
- << " from object of type " << obj_type;
+ VerifyError type;
+ bool is_aot = Runtime::Current()->IsAotCompiler();
+ if (is_aot && (field_klass.IsUnresolvedTypes() || obj_type.IsUnresolvedTypes())) {
+ // Compiler & unresolved types involved, retry at runtime.
+ type = VerifyError::VERIFY_ERROR_NO_CLASS;
+ } else {
+ // Classes known (resolved; and thus assignability check is precise), or we are at runtime
+ // and still missing classes. This is a hard failure.
+ type = VerifyError::VERIFY_ERROR_BAD_CLASS_HARD;
+ }
+ Fail(type) << "cannot access instance field " << PrettyField(field)
+ << " from object of type " << obj_type;
return nullptr;
} else {
return field;
diff --git a/test/100-reflect2/expected.txt b/test/100-reflect2/expected.txt
index e4988c9..d878e69 100644
--- a/test/100-reflect2/expected.txt
+++ b/test/100-reflect2/expected.txt
@@ -32,8 +32,8 @@
62 (class java.lang.Long)
14 (class java.lang.Short)
[java.lang.String(int,int,char[]), public java.lang.String(), public java.lang.String(byte[]), public java.lang.String(byte[],int), public java.lang.String(byte[],int,int), public java.lang.String(byte[],int,int,int), public java.lang.String(byte[],int,int,java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],int,int,java.nio.charset.Charset), public java.lang.String(byte[],java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],java.nio.charset.Charset), public java.lang.String(char[]), public java.lang.String(char[],int,int), public java.lang.String(int[],int,int), public java.lang.String(java.lang.String), public java.lang.String(java.lang.StringBuffer), public java.lang.String(java.lang.StringBuilder)]
-[private final int java.lang.String.count, private int java.lang.String.hash, private static final java.io.ObjectStreamField[] java.lang.String.serialPersistentFields, private static final long java.lang.String.serialVersionUID, private static int java.lang.String.HASHING_SEED, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER]
-[int java.lang.String.hash32(), native void java.lang.String.getCharsNoCheck(int,int,char[],int), native void java.lang.String.setCharAt(int,char), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private native int java.lang.String.fastIndexOf(int,int), private native java.lang.String java.lang.String.fastSubstring(int,int), private static int java.lang.String.getHashingSeed(), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public boolean java.lang.String.isEmpty(), public boolean java.lang.String.matches(java.lang.String), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public byte[] java.lang.String.getBytes(), public byte[] java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public byte[] java.lang.String.getBytes(java.nio.charset.Charset), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public int java.lang.String.compareTo(java.lang.Object), public int java.lang.String.compareToIgnoreCase(java.lang.String), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public int java.lang.String.offsetByCodePoints(int,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public java.lang.String[] java.lang.String.split(java.lang.String), public java.lang.String[] java.lang.String.split(java.lang.String,int), public native char java.lang.String.charAt(int), public native char[] java.lang.String.toCharArray(), public native int java.lang.String.compareTo(java.lang.String), public native java.lang.String java.lang.String.concat(java.lang.String), public native java.lang.String java.lang.String.intern(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), public static java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(long), public void java.lang.String.getBytes(int,int,byte[],int), public void java.lang.String.getChars(int,int,char[],int), static int java.lang.String.indexOf(char[],int,int,char[],int,int,int), static int java.lang.String.indexOf(java.lang.String,java.lang.String,int), static int java.lang.String.lastIndexOf(char[],int,int,char[],int,int,int), static int java.lang.String.lastIndexOf(java.lang.String,java.lang.String,int)]
+[private final int java.lang.String.count, private int java.lang.String.hash, private static final java.io.ObjectStreamField[] java.lang.String.serialPersistentFields, private static final long java.lang.String.serialVersionUID, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER]
+[native void java.lang.String.getCharsNoCheck(int,int,char[],int), native void java.lang.String.setCharAt(int,char), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private native int java.lang.String.fastIndexOf(int,int), private native java.lang.String java.lang.String.fastSubstring(int,int), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public boolean java.lang.String.isEmpty(), public boolean java.lang.String.matches(java.lang.String), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public byte[] java.lang.String.getBytes(), public byte[] java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public byte[] java.lang.String.getBytes(java.nio.charset.Charset), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public int java.lang.String.compareTo(java.lang.Object), public int java.lang.String.compareToIgnoreCase(java.lang.String), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public int java.lang.String.offsetByCodePoints(int,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public java.lang.String[] java.lang.String.split(java.lang.String), public java.lang.String[] java.lang.String.split(java.lang.String,int), public native char java.lang.String.charAt(int), public native char[] java.lang.String.toCharArray(), public native int java.lang.String.compareTo(java.lang.String), public native java.lang.String java.lang.String.concat(java.lang.String), public native java.lang.String java.lang.String.intern(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), public static java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(long), public void java.lang.String.getBytes(int,int,byte[],int), public void java.lang.String.getChars(int,int,char[],int), static int java.lang.String.indexOf(char[],int,int,char[],int,int,int), static int java.lang.String.indexOf(java.lang.String,java.lang.String,int), static int java.lang.String.lastIndexOf(char[],int,int,char[],int,int,int), static int java.lang.String.lastIndexOf(java.lang.String,java.lang.String,int)]
[]
[interface java.io.Serializable, interface java.lang.Comparable, interface java.lang.CharSequence]
0
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index c2a9a31..11150c2 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -66,4 +66,5 @@
b/27799205 (4)
b/27799205 (5)
b/27799205 (6)
+b/28187158
Done!
diff --git a/test/800-smali/smali/b_28187158.smali b/test/800-smali/smali/b_28187158.smali
new file mode 100644
index 0000000..14d5cec
--- /dev/null
+++ b/test/800-smali/smali/b_28187158.smali
@@ -0,0 +1,12 @@
+.class public LB28187158;
+
+# Regression test for iget with wrong classes.
+
+.super Ljava/lang/Object;
+
+.method public static run(Ljava/lang/Integer;)V
+ .registers 2
+ iget v0, p0, Ljava/lang/System;->in:Ljava/io/InputStream;
+ return-void
+.end method
+
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index 2001cb4..c883b7f 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -174,6 +174,8 @@
testCases.add(new TestCase("b/27799205 (5)", "B27799205Helper", "run5", null,
new VerifyError(), null));
testCases.add(new TestCase("b/27799205 (6)", "B27799205Helper", "run6", null, null, null));
+ testCases.add(new TestCase("b/28187158", "B28187158", "run", new Object[] { null} ,
+ new VerifyError(), null));
}
public void runTests() {