Merge "Fix using LiveIntervals beyond their lifetime."
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 28709a1..5b57718 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -62,23 +62,24 @@
const char* shorty = "IIFII";
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
std::unique_ptr<JniCallingConvention> jni_conv(
- JniCallingConvention::Create(&arena,
+ JniCallingConvention::Create(&allocator,
is_static,
is_synchronized,
/*is_critical_native*/false,
shorty,
isa));
std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
- ManagedRuntimeCallingConvention::Create(&arena, is_static, is_synchronized, shorty, isa));
+ ManagedRuntimeCallingConvention::Create(
+ &allocator, is_static, is_synchronized, shorty, isa));
const int frame_size(jni_conv->FrameSize());
ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters();
// Assemble the method.
std::unique_ptr<JNIMacroAssembler<kPointerSize>> jni_asm(
- JNIMacroAssembler<kPointerSize>::Create(&arena, isa));
+ JNIMacroAssembler<kPointerSize>::Create(&allocator, isa));
jni_asm->cfi().SetEnabled(true);
jni_asm->BuildFrame(frame_size, mr_conv->MethodRegister(),
callee_save_regs, mr_conv->EntrySpills());
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 92b5c4d..e32b681 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -179,11 +179,11 @@
}
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
// Calling conventions used to iterate over parameters to method
std::unique_ptr<JniCallingConvention> main_jni_conv =
- JniCallingConvention::Create(&arena,
+ JniCallingConvention::Create(&allocator,
is_static,
is_synchronized,
is_critical_native,
@@ -193,7 +193,7 @@
std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
ManagedRuntimeCallingConvention::Create(
- &arena, is_static, is_synchronized, shorty, instruction_set));
+ &allocator, is_static, is_synchronized, shorty, instruction_set));
// Calling conventions to call into JNI method "end" possibly passing a returned reference, the
// method and the current thread.
@@ -209,7 +209,7 @@
}
std::unique_ptr<JniCallingConvention> end_jni_conv(
- JniCallingConvention::Create(&arena,
+ JniCallingConvention::Create(&allocator,
is_static,
is_synchronized,
is_critical_native,
@@ -218,7 +218,7 @@
// Assembler that holds generated instructions
std::unique_ptr<JNIMacroAssembler<kPointerSize>> jni_asm =
- GetMacroAssembler<kPointerSize>(&arena, instruction_set, instruction_set_features);
+ GetMacroAssembler<kPointerSize>(&allocator, instruction_set, instruction_set_features);
const CompilerOptions& compiler_options = driver->GetCompilerOptions();
jni_asm->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
jni_asm->SetEmitRunTimeChecksInDebugMode(compiler_options.EmitRunTimeChecksInDebugMode());
diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc
index f84fea3..3d56833 100644
--- a/compiler/linker/arm/relative_patcher_thumb2.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2.cc
@@ -354,8 +354,8 @@
std::vector<uint8_t> Thumb2RelativePatcher::CompileThunk(const ThunkKey& key) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- arm::ArmVIXLAssembler assembler(&arena);
+ ArenaAllocator allocator(&pool);
+ arm::ArmVIXLAssembler assembler(&allocator);
switch (key.GetType()) {
case ThunkType::kMethodCall:
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 828c99b..663e43b 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -511,8 +511,8 @@
std::vector<uint8_t> Arm64RelativePatcher::CompileThunk(const ThunkKey& key) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- arm64::Arm64Assembler assembler(&arena);
+ ArenaAllocator allocator(&pool);
+ arm64::Arm64Assembler assembler(&allocator);
switch (key.GetType()) {
case ThunkType::kMethodCall: {
diff --git a/compiler/optimizing/block_builder.cc b/compiler/optimizing/block_builder.cc
index fe7ecd1..d7def77 100644
--- a/compiler/optimizing/block_builder.cc
+++ b/compiler/optimizing/block_builder.cc
@@ -29,7 +29,7 @@
uint32_t store_dex_pc) {
HBasicBlock* block = branch_targets_[store_dex_pc];
if (block == nullptr) {
- block = new (arena_) HBasicBlock(graph_, semantic_dex_pc);
+ block = new (allocator_) HBasicBlock(graph_, semantic_dex_pc);
branch_targets_[store_dex_pc] = block;
}
DCHECK_EQ(block->GetDexPc(), semantic_dex_pc);
@@ -200,7 +200,7 @@
// Returns the TryItem stored for `block` or nullptr if there is no info for it.
static const DexFile::TryItem* GetTryItem(
HBasicBlock* block,
- const ArenaSafeMap<uint32_t, const DexFile::TryItem*>& try_block_info) {
+ const ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*>& try_block_info) {
auto iterator = try_block_info.find(block->GetBlockId());
return (iterator == try_block_info.end()) ? nullptr : iterator->second;
}
@@ -212,7 +212,7 @@
static void LinkToCatchBlocks(HTryBoundary* try_boundary,
const DexFile::CodeItem& code_item,
const DexFile::TryItem* try_item,
- const ArenaSafeMap<uint32_t, HBasicBlock*>& catch_blocks) {
+ const ScopedArenaSafeMap<uint32_t, HBasicBlock*>& catch_blocks) {
for (CatchHandlerIterator it(code_item, *try_item); it.HasNext(); it.Next()) {
try_boundary->AddExceptionHandler(catch_blocks.Get(it.GetHandlerAddress()));
}
@@ -253,8 +253,8 @@
// Keep a map of all try blocks and their respective TryItems. We do not use
// the block's pointer but rather its id to ensure deterministic iteration.
- ArenaSafeMap<uint32_t, const DexFile::TryItem*> try_block_info(
- std::less<uint32_t>(), arena_->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*> try_block_info(
+ std::less<uint32_t>(), local_allocator_->Adapter(kArenaAllocGraphBuilder));
// Obtain TryItem information for blocks with throwing instructions, and split
// blocks which are both try & catch to simplify the graph.
@@ -278,8 +278,8 @@
}
// Map from a handler dex_pc to the corresponding catch block.
- ArenaSafeMap<uint32_t, HBasicBlock*> catch_blocks(
- std::less<uint32_t>(), arena_->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaSafeMap<uint32_t, HBasicBlock*> catch_blocks(
+ std::less<uint32_t>(), local_allocator_->Adapter(kArenaAllocGraphBuilder));
// Iterate over catch blocks, create artifical landing pads if necessary to
// simplify the CFG, and set metadata.
@@ -302,8 +302,8 @@
HBasicBlock* catch_block = GetBlockAt(address);
bool is_try_block = (try_block_info.find(catch_block->GetBlockId()) != try_block_info.end());
if (is_try_block || MightHaveLiveNormalPredecessors(catch_block)) {
- HBasicBlock* new_catch_block = new (arena_) HBasicBlock(graph_, address);
- new_catch_block->AddInstruction(new (arena_) HGoto(address));
+ HBasicBlock* new_catch_block = new (allocator_) HBasicBlock(graph_, address);
+ new_catch_block->AddInstruction(new (allocator_) HGoto(address));
new_catch_block->AddSuccessor(catch_block);
graph_->AddBlock(new_catch_block);
catch_block = new_catch_block;
@@ -311,7 +311,7 @@
catch_blocks.Put(address, catch_block);
catch_block->SetTryCatchInformation(
- new (arena_) TryCatchInformation(iterator.GetHandlerTypeIndex(), *dex_file_));
+ new (allocator_) TryCatchInformation(iterator.GetHandlerTypeIndex(), *dex_file_));
}
handlers_ptr = iterator.EndDataPointer();
}
@@ -328,8 +328,8 @@
if (GetTryItem(predecessor, try_block_info) != try_item) {
// Found a predecessor not covered by the same TryItem. Insert entering
// boundary block.
- HTryBoundary* try_entry =
- new (arena_) HTryBoundary(HTryBoundary::BoundaryKind::kEntry, try_block->GetDexPc());
+ HTryBoundary* try_entry = new (allocator_) HTryBoundary(
+ HTryBoundary::BoundaryKind::kEntry, try_block->GetDexPc());
try_block->CreateImmediateDominator()->AddInstruction(try_entry);
LinkToCatchBlocks(try_entry, code_item_, try_item, catch_blocks);
break;
@@ -357,7 +357,7 @@
// Insert TryBoundary and link to catch blocks.
HTryBoundary* try_exit =
- new (arena_) HTryBoundary(HTryBoundary::BoundaryKind::kExit, successor->GetDexPc());
+ new (allocator_) HTryBoundary(HTryBoundary::BoundaryKind::kExit, successor->GetDexPc());
graph_->SplitEdge(try_block, successor)->AddInstruction(try_exit);
LinkToCatchBlocks(try_exit, code_item_, try_item, catch_blocks);
}
@@ -367,8 +367,8 @@
bool HBasicBlockBuilder::Build() {
DCHECK(graph_->GetBlocks().empty());
- graph_->SetEntryBlock(new (arena_) HBasicBlock(graph_, kNoDexPc));
- graph_->SetExitBlock(new (arena_) HBasicBlock(graph_, kNoDexPc));
+ graph_->SetEntryBlock(new (allocator_) HBasicBlock(graph_, kNoDexPc));
+ graph_->SetExitBlock(new (allocator_) HBasicBlock(graph_, kNoDexPc));
// TODO(dbrazdil): Do CreateBranchTargets and ConnectBasicBlocks in one pass.
if (!CreateBranchTargets()) {
diff --git a/compiler/optimizing/block_builder.h b/compiler/optimizing/block_builder.h
index 4a0f78c..79f7a7b 100644
--- a/compiler/optimizing/block_builder.h
+++ b/compiler/optimizing/block_builder.h
@@ -17,8 +17,8 @@
#ifndef ART_COMPILER_OPTIMIZING_BLOCK_BUILDER_H_
#define ART_COMPILER_OPTIMIZING_BLOCK_BUILDER_H_
-#include "base/arena_containers.h"
-#include "base/arena_object.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "dex_file.h"
#include "nodes.h"
@@ -28,17 +28,21 @@
public:
HBasicBlockBuilder(HGraph* graph,
const DexFile* const dex_file,
- const DexFile::CodeItem& code_item)
- : arena_(graph->GetAllocator()),
+ const DexFile::CodeItem& code_item,
+ ScopedArenaAllocator* local_allocator)
+ : allocator_(graph->GetAllocator()),
graph_(graph),
dex_file_(dex_file),
code_item_(code_item),
+ local_allocator_(local_allocator),
branch_targets_(code_item.insns_size_in_code_units_,
nullptr,
- arena_->Adapter(kArenaAllocGraphBuilder)),
- throwing_blocks_(kDefaultNumberOfThrowingBlocks, arena_->Adapter(kArenaAllocGraphBuilder)),
+ local_allocator->Adapter(kArenaAllocGraphBuilder)),
+ throwing_blocks_(kDefaultNumberOfThrowingBlocks,
+ local_allocator->Adapter(kArenaAllocGraphBuilder)),
number_of_branches_(0u),
- quicken_index_for_dex_pc_(std::less<uint32_t>(), arena_->Adapter()) {}
+ quicken_index_for_dex_pc_(std::less<uint32_t>(),
+ local_allocator->Adapter(kArenaAllocGraphBuilder)) {}
// Creates basic blocks in `graph_` at branch target dex_pc positions of the
// `code_item_`. Blocks are connected but left unpopulated with instructions.
@@ -71,18 +75,19 @@
// handler dex_pcs.
bool MightHaveLiveNormalPredecessors(HBasicBlock* catch_block);
- ArenaAllocator* const arena_;
+ ArenaAllocator* const allocator_;
HGraph* const graph_;
const DexFile* const dex_file_;
const DexFile::CodeItem& code_item_;
- ArenaVector<HBasicBlock*> branch_targets_;
- ArenaVector<HBasicBlock*> throwing_blocks_;
+ ScopedArenaAllocator* const local_allocator_;
+ ScopedArenaVector<HBasicBlock*> branch_targets_;
+ ScopedArenaVector<HBasicBlock*> throwing_blocks_;
size_t number_of_branches_;
// A table to quickly find the quicken index for the first instruction of a basic block.
- ArenaSafeMap<uint32_t, uint32_t> quicken_index_for_dex_pc_;
+ ScopedArenaSafeMap<uint32_t, uint32_t> quicken_index_for_dex_pc_;
static constexpr size_t kDefaultNumberOfThrowingBlocks = 2u;
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 0255e73..9c2068e 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -18,7 +18,8 @@
#include <limits>
-#include "base/arena_containers.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "induction_var_range.h"
#include "nodes.h"
#include "side_effects_analysis.h"
@@ -287,7 +288,7 @@
*/
class ValueRange : public ArenaObject<kArenaAllocBoundsCheckElimination> {
public:
- ValueRange(ArenaAllocator* allocator, ValueBound lower, ValueBound upper)
+ ValueRange(ScopedArenaAllocator* allocator, ValueBound lower, ValueBound upper)
: allocator_(allocator), lower_(lower), upper_(upper) {}
virtual ~ValueRange() {}
@@ -297,7 +298,7 @@
return AsMonotonicValueRange() != nullptr;
}
- ArenaAllocator* GetAllocator() const { return allocator_; }
+ ScopedArenaAllocator* GetAllocator() const { return allocator_; }
ValueBound GetLower() const { return lower_; }
ValueBound GetUpper() const { return upper_; }
@@ -350,7 +351,7 @@
}
private:
- ArenaAllocator* const allocator_;
+ ScopedArenaAllocator* const allocator_;
const ValueBound lower_; // inclusive
const ValueBound upper_; // inclusive
@@ -365,7 +366,7 @@
*/
class MonotonicValueRange : public ValueRange {
public:
- MonotonicValueRange(ArenaAllocator* allocator,
+ MonotonicValueRange(ScopedArenaAllocator* allocator,
HPhi* induction_variable,
HInstruction* initial,
int32_t increment,
@@ -510,21 +511,19 @@
const SideEffectsAnalysis& side_effects,
HInductionVarAnalysis* induction_analysis)
: HGraphVisitor(graph),
+ allocator_(graph->GetArenaStack()),
maps_(graph->GetBlocks().size(),
- ArenaSafeMap<int, ValueRange*>(
+ ScopedArenaSafeMap<int, ValueRange*>(
std::less<int>(),
- graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
- graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
- first_index_bounds_check_map_(
- std::less<int>(),
- graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
- early_exit_loop_(
- std::less<uint32_t>(),
- graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
- taken_test_loop_(
- std::less<uint32_t>(),
- graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
- finite_loop_(graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
+ allocator_.Adapter(kArenaAllocBoundsCheckElimination)),
+ allocator_.Adapter(kArenaAllocBoundsCheckElimination)),
+ first_index_bounds_check_map_(std::less<int>(),
+ allocator_.Adapter(kArenaAllocBoundsCheckElimination)),
+ early_exit_loop_(std::less<uint32_t>(),
+ allocator_.Adapter(kArenaAllocBoundsCheckElimination)),
+ taken_test_loop_(std::less<uint32_t>(),
+ allocator_.Adapter(kArenaAllocBoundsCheckElimination)),
+ finite_loop_(allocator_.Adapter(kArenaAllocBoundsCheckElimination)),
has_dom_based_dynamic_bce_(false),
initial_block_size_(graph->GetBlocks().size()),
side_effects_(side_effects),
@@ -569,7 +568,7 @@
private:
// Return the map of proven value ranges at the beginning of a basic block.
- ArenaSafeMap<int, ValueRange*>* GetValueRangeMap(HBasicBlock* basic_block) {
+ ScopedArenaSafeMap<int, ValueRange*>* GetValueRangeMap(HBasicBlock* basic_block) {
if (IsAddedBlock(basic_block)) {
// Added blocks don't keep value ranges.
return nullptr;
@@ -580,7 +579,7 @@
// Traverse up the dominator tree to look for value range info.
ValueRange* LookupValueRange(HInstruction* instruction, HBasicBlock* basic_block) {
while (basic_block != nullptr) {
- ArenaSafeMap<int, ValueRange*>* map = GetValueRangeMap(basic_block);
+ ScopedArenaSafeMap<int, ValueRange*>* map = GetValueRangeMap(basic_block);
if (map != nullptr) {
if (map->find(instruction->GetId()) != map->end()) {
return map->Get(instruction->GetId());
@@ -668,8 +667,8 @@
if (successor != nullptr) {
bool overflow;
bool underflow;
- ValueRange* new_left_range = new (GetGraph()->GetAllocator()) ValueRange(
- GetGraph()->GetAllocator(),
+ ValueRange* new_left_range = new (&allocator_) ValueRange(
+ &allocator_,
left_range->GetBound(),
right_range->GetBound().Add(left_compensation, &overflow, &underflow));
if (!overflow && !underflow) {
@@ -677,8 +676,8 @@
new_left_range);
}
- ValueRange* new_right_range = new (GetGraph()->GetAllocator()) ValueRange(
- GetGraph()->GetAllocator(),
+ ValueRange* new_right_range = new (&allocator_) ValueRange(
+ &allocator_,
left_range->GetBound().Add(right_compensation, &overflow, &underflow),
right_range->GetBound());
if (!overflow && !underflow) {
@@ -750,8 +749,8 @@
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetAllocator())
- ValueRange(GetGraph()->GetAllocator(), ValueBound::Min(), new_upper);
+ ValueRange* new_range = new (&allocator_) ValueRange(
+ &allocator_, ValueBound::Min(), new_upper);
ApplyRangeFromComparison(left, block, true_successor, new_range);
}
@@ -762,8 +761,8 @@
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetAllocator())
- ValueRange(GetGraph()->GetAllocator(), new_lower, ValueBound::Max());
+ ValueRange* new_range = new (&allocator_) ValueRange(
+ &allocator_, new_lower, ValueBound::Max());
ApplyRangeFromComparison(left, block, false_successor, new_range);
}
} else if (cond == kCondGT || cond == kCondGE) {
@@ -774,8 +773,8 @@
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetAllocator())
- ValueRange(GetGraph()->GetAllocator(), new_lower, ValueBound::Max());
+ ValueRange* new_range = new (&allocator_) ValueRange(
+ &allocator_, new_lower, ValueBound::Max());
ApplyRangeFromComparison(left, block, true_successor, new_range);
}
@@ -785,8 +784,8 @@
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetAllocator())
- ValueRange(GetGraph()->GetAllocator(), ValueBound::Min(), new_upper);
+ ValueRange* new_range = new (&allocator_) ValueRange(
+ &allocator_, ValueBound::Min(), new_upper);
ApplyRangeFromComparison(left, block, false_successor, new_range);
}
} else if (cond == kCondNE || cond == kCondEQ) {
@@ -795,8 +794,7 @@
// length == [c,d] yields [c, d] along true
// length != [c,d] yields [c, d] along false
if (!lower.Equals(ValueBound::Min()) || !upper.Equals(ValueBound::Max())) {
- ValueRange* new_range = new (GetGraph()->GetAllocator())
- ValueRange(GetGraph()->GetAllocator(), lower, upper);
+ ValueRange* new_range = new (&allocator_) ValueRange(&allocator_, lower, upper);
ApplyRangeFromComparison(
left, block, cond == kCondEQ ? true_successor : false_successor, new_range);
}
@@ -804,8 +802,8 @@
// length == 0 yields [1, max] along false
// length != 0 yields [1, max] along true
if (lower.GetConstant() == 0 && upper.GetConstant() == 0) {
- ValueRange* new_range = new (GetGraph()->GetAllocator())
- ValueRange(GetGraph()->GetAllocator(), ValueBound(nullptr, 1), ValueBound::Max());
+ ValueRange* new_range = new (&allocator_) ValueRange(
+ &allocator_, ValueBound(nullptr, 1), ValueBound::Max());
ApplyRangeFromComparison(
left, block, cond == kCondEQ ? false_successor : true_successor, new_range);
}
@@ -826,7 +824,7 @@
// Non-constant index.
ValueBound lower = ValueBound(nullptr, 0); // constant 0
ValueBound upper = ValueBound(array_length, -1); // array_length - 1
- ValueRange array_range(GetGraph()->GetAllocator(), lower, upper);
+ ValueRange array_range(&allocator_, lower, upper);
// Try index range obtained by dominator-based analysis.
ValueRange* index_range = LookupValueRange(index, block);
if (index_range != nullptr && index_range->FitsIn(&array_range)) {
@@ -875,8 +873,7 @@
} else {
ValueBound lower = ValueBound(nullptr, constant + 1);
ValueBound upper = ValueBound::Max();
- ValueRange* range = new (GetGraph()->GetAllocator())
- ValueRange(GetGraph()->GetAllocator(), lower, upper);
+ ValueRange* range = new (&allocator_) ValueRange(&allocator_, lower, upper);
AssignRange(block, array_length, range);
}
}
@@ -938,8 +935,8 @@
ValueRange* range = nullptr;
if (increment == 0) {
// Add constant 0. It's really a fixed value.
- range = new (GetGraph()->GetAllocator()) ValueRange(
- GetGraph()->GetAllocator(),
+ range = new (&allocator_) ValueRange(
+ &allocator_,
ValueBound(initial_value, 0),
ValueBound(initial_value, 0));
} else {
@@ -959,8 +956,8 @@
bound = increment > 0 ? ValueBound::Min() : ValueBound::Max();
}
}
- range = new (GetGraph()->GetAllocator()) MonotonicValueRange(
- GetGraph()->GetAllocator(),
+ range = new (&allocator_) MonotonicValueRange(
+ &allocator_,
phi,
initial_value,
increment,
@@ -1039,8 +1036,8 @@
!ValueBound::WouldAddOverflowOrUnderflow(c0, -c1)) {
if ((c0 - c1) <= 0) {
// array.length + (c0 - c1) won't overflow/underflow.
- ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange(
- GetGraph()->GetAllocator(),
+ ValueRange* range = new (&allocator_) ValueRange(
+ &allocator_,
ValueBound(nullptr, right_const - upper.GetConstant()),
ValueBound(array_length, right_const - lower.GetConstant()));
AssignRange(sub->GetBlock(), sub, range);
@@ -1087,8 +1084,8 @@
// than array_length.
return;
}
- ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange(
- GetGraph()->GetAllocator(),
+ ValueRange* range = new (&allocator_) ValueRange(
+ &allocator_,
ValueBound(nullptr, std::numeric_limits<int32_t>::min()),
ValueBound(left, 0));
AssignRange(instruction->GetBlock(), instruction, range);
@@ -1113,8 +1110,8 @@
if (constant > 0) {
// constant serves as a mask so any number masked with it
// gets a [0, constant] value range.
- ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange(
- GetGraph()->GetAllocator(),
+ ValueRange* range = new (&allocator_) ValueRange(
+ &allocator_,
ValueBound(nullptr, 0),
ValueBound(nullptr, constant));
AssignRange(instruction->GetBlock(), instruction, range);
@@ -1139,8 +1136,8 @@
// array[i % 10]; // index value range [0, 9]
// array[i % -10]; // index value range [0, 9]
// }
- ValueRange* right_range = new (GetGraph()->GetAllocator()) ValueRange(
- GetGraph()->GetAllocator(),
+ ValueRange* right_range = new (&allocator_) ValueRange(
+ &allocator_,
ValueBound(nullptr, 1 - right_const),
ValueBound(nullptr, right_const - 1));
@@ -1169,8 +1166,8 @@
if (right->IsArrayLength()) {
ValueBound lower = ValueBound::Min(); // ideally, lower should be '1-array_length'.
ValueBound upper = ValueBound(right, -1); // array_length - 1
- ValueRange* right_range = new (GetGraph()->GetAllocator()) ValueRange(
- GetGraph()->GetAllocator(),
+ ValueRange* right_range = new (&allocator_) ValueRange(
+ &allocator_,
lower,
upper);
ValueRange* left_range = LookupValueRange(left, instruction->GetBlock());
@@ -1195,8 +1192,7 @@
// which isn't available as an instruction yet. new_array will
// be treated the same as new_array.length when it's used in a ValueBound.
ValueBound upper = ValueBound(new_array, -right_const);
- ValueRange* range = new (GetGraph()->GetAllocator())
- ValueRange(GetGraph()->GetAllocator(), lower, upper);
+ ValueRange* range = new (&allocator_) ValueRange(&allocator_, lower, upper);
ValueRange* existing_range = LookupValueRange(left, new_array->GetBlock());
if (existing_range != nullptr) {
range = existing_range->Narrow(range);
@@ -1291,10 +1287,10 @@
HInstruction* base = value.GetInstruction();
int32_t min_c = base == nullptr ? 0 : value.GetConstant();
int32_t max_c = value.GetConstant();
- ArenaVector<HBoundsCheck*> candidates(
- GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
- ArenaVector<HBoundsCheck*> standby(
- GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
+ ScopedArenaVector<HBoundsCheck*> candidates(
+ allocator_.Adapter(kArenaAllocBoundsCheckElimination));
+ ScopedArenaVector<HBoundsCheck*> standby(
+ allocator_.Adapter(kArenaAllocBoundsCheckElimination));
for (const HUseListNode<HInstruction*>& use : array_length->GetUses()) {
// Another bounds check in same or dominated block?
HInstruction* user = use.GetUser();
@@ -1378,7 +1374,7 @@
v2.is_known && (v2.a_constant == 0 || v2.a_constant == 1)) {
DCHECK(v1.a_constant == 1 || v1.instruction == nullptr);
DCHECK(v2.a_constant == 1 || v2.instruction == nullptr);
- ValueRange index_range(GetGraph()->GetAllocator(),
+ ValueRange index_range(&allocator_,
ValueBound(v1.instruction, v1.b_constant),
ValueBound(v2.instruction, v2.b_constant));
// If analysis reveals a certain OOB, disable dynamic BCE. Otherwise,
@@ -1410,10 +1406,10 @@
HInstruction* base = value.GetInstruction();
int32_t min_c = base == nullptr ? 0 : value.GetConstant();
int32_t max_c = value.GetConstant();
- ArenaVector<HBoundsCheck*> candidates(
- GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
- ArenaVector<HBoundsCheck*> standby(
- GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
+ ScopedArenaVector<HBoundsCheck*> candidates(
+ allocator_.Adapter(kArenaAllocBoundsCheckElimination));
+ ScopedArenaVector<HBoundsCheck*> standby(
+ allocator_.Adapter(kArenaAllocBoundsCheckElimination));
for (const HUseListNode<HInstruction*>& use : array_length->GetUses()) {
HInstruction* user = use.GetUser();
if (user->IsBoundsCheck() && loop == user->GetBlock()->GetLoopInformation()) {
@@ -1882,21 +1878,24 @@
instruction->GetBlock()->RemoveInstruction(instruction);
}
+ // Use local allocator for allocating memory.
+ ScopedArenaAllocator allocator_;
+
// A set of maps, one per basic block, from instruction to range.
- ArenaVector<ArenaSafeMap<int, ValueRange*>> maps_;
+ ScopedArenaVector<ScopedArenaSafeMap<int, ValueRange*>> maps_;
// Map an HArrayLength instruction's id to the first HBoundsCheck instruction
// in a block that checks an index against that HArrayLength.
- ArenaSafeMap<int, HBoundsCheck*> first_index_bounds_check_map_;
+ ScopedArenaSafeMap<int, HBoundsCheck*> first_index_bounds_check_map_;
// Early-exit loop bookkeeping.
- ArenaSafeMap<uint32_t, bool> early_exit_loop_;
+ ScopedArenaSafeMap<uint32_t, bool> early_exit_loop_;
// Taken-test loop bookkeeping.
- ArenaSafeMap<uint32_t, HBasicBlock*> taken_test_loop_;
+ ScopedArenaSafeMap<uint32_t, HBasicBlock*> taken_test_loop_;
// Finite loop bookkeeping.
- ArenaSet<uint32_t> finite_loop_;
+ ScopedArenaSet<uint32_t> finite_loop_;
// Flag that denotes whether dominator-based dynamic elimination has occurred.
bool has_dom_based_dynamic_bce_;
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 76350a6..4ed1612 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -20,12 +20,16 @@
#include "base/arena_bit_vector.h"
#include "base/bit_vector-inl.h"
#include "base/logging.h"
+#include "block_builder.h"
#include "data_type-inl.h"
#include "dex/verified_method.h"
#include "driver/compiler_options.h"
+#include "instruction_builder.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "nodes.h"
+#include "optimizing_compiler_stats.h"
+#include "ssa_builder.h"
#include "thread.h"
#include "utils/dex_cache_arrays_layout-inl.h"
@@ -43,27 +47,13 @@
dex_file_(&graph->GetDexFile()),
code_item_(*dex_compilation_unit->GetCodeItem()),
dex_compilation_unit_(dex_compilation_unit),
+ outer_compilation_unit_(outer_compilation_unit),
compiler_driver_(driver),
+ code_generator_(code_generator),
compilation_stats_(compiler_stats),
- block_builder_(graph, dex_file_, code_item_),
- ssa_builder_(graph,
- dex_compilation_unit->GetClassLoader(),
- dex_compilation_unit->GetDexCache(),
- handles),
- instruction_builder_(graph,
- &block_builder_,
- &ssa_builder_,
- dex_file_,
- code_item_,
- DataType::FromShorty(dex_compilation_unit_->GetShorty()[0]),
- dex_compilation_unit,
- outer_compilation_unit,
- driver,
- code_generator,
- interpreter_metadata,
- compiler_stats,
- dex_compilation_unit->GetDexCache(),
- handles) {}
+ interpreter_metadata_(interpreter_metadata),
+ handles_(handles),
+ return_type_(DataType::FromShorty(dex_compilation_unit_->GetShorty()[0])) {}
bool HGraphBuilder::SkipCompilation(size_t number_of_branches) {
if (compiler_driver_ == nullptr) {
@@ -108,15 +98,38 @@
graph_->SetMaximumNumberOfOutVRegs(code_item_.outs_size_);
graph_->SetHasTryCatch(code_item_.tries_size_ != 0);
+ // Use ScopedArenaAllocator for all local allocations.
+ ScopedArenaAllocator local_allocator(graph_->GetArenaStack());
+ HBasicBlockBuilder block_builder(graph_, dex_file_, code_item_, &local_allocator);
+ SsaBuilder ssa_builder(graph_,
+ dex_compilation_unit_->GetClassLoader(),
+ dex_compilation_unit_->GetDexCache(),
+ handles_,
+ &local_allocator);
+ HInstructionBuilder instruction_builder(graph_,
+ &block_builder,
+ &ssa_builder,
+ dex_file_,
+ code_item_,
+ return_type_,
+ dex_compilation_unit_,
+ outer_compilation_unit_,
+ compiler_driver_,
+ code_generator_,
+ interpreter_metadata_,
+ compilation_stats_,
+ handles_,
+ &local_allocator);
+
// 1) Create basic blocks and link them together. Basic blocks are left
// unpopulated with the exception of synthetic blocks, e.g. HTryBoundaries.
- if (!block_builder_.Build()) {
+ if (!block_builder.Build()) {
return kAnalysisInvalidBytecode;
}
// 2) Decide whether to skip this method based on its code size and number
// of branches.
- if (SkipCompilation(block_builder_.GetNumberOfBranches())) {
+ if (SkipCompilation(block_builder.GetNumberOfBranches())) {
return kAnalysisSkipped;
}
@@ -127,12 +140,12 @@
}
// 4) Populate basic blocks with instructions.
- if (!instruction_builder_.Build()) {
+ if (!instruction_builder.Build()) {
return kAnalysisInvalidBytecode;
}
// 5) Type the graph and eliminate dead/redundant phis.
- return ssa_builder_.BuildSsa();
+ return ssa_builder.BuildSsa();
}
} // namespace art
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 6c5985a..5a860f1 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -17,21 +17,17 @@
#ifndef ART_COMPILER_OPTIMIZING_BUILDER_H_
#define ART_COMPILER_OPTIMIZING_BUILDER_H_
-#include "base/arena_containers.h"
#include "base/arena_object.h"
-#include "block_builder.h"
#include "dex_file-inl.h"
#include "dex_file.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
-#include "instruction_builder.h"
#include "nodes.h"
-#include "optimizing_compiler_stats.h"
-#include "ssa_builder.h"
namespace art {
class CodeGenerator;
+class OptimizingCompilerStats;
class HGraphBuilder : public ValueObject {
public:
@@ -46,34 +42,21 @@
// Only for unit testing.
HGraphBuilder(HGraph* graph,
+ const DexCompilationUnit* dex_compilation_unit,
const DexFile::CodeItem& code_item,
VariableSizedHandleScope* handles,
DataType::Type return_type = DataType::Type::kInt32)
: graph_(graph),
- dex_file_(nullptr),
+ dex_file_(dex_compilation_unit->GetDexFile()),
code_item_(code_item),
- dex_compilation_unit_(nullptr),
+ dex_compilation_unit_(dex_compilation_unit),
+ outer_compilation_unit_(nullptr),
compiler_driver_(nullptr),
+ code_generator_(nullptr),
compilation_stats_(nullptr),
- block_builder_(graph, nullptr, code_item),
- ssa_builder_(graph,
- handles->NewHandle<mirror::ClassLoader>(nullptr),
- handles->NewHandle<mirror::DexCache>(nullptr),
- handles),
- instruction_builder_(graph,
- &block_builder_,
- &ssa_builder_,
- /* dex_file */ nullptr,
- code_item_,
- return_type,
- /* dex_compilation_unit */ nullptr,
- /* outer_compilation_unit */ nullptr,
- /* compiler_driver */ nullptr,
- /* code_generator */ nullptr,
- /* interpreter_metadata */ nullptr,
- /* compiler_stats */ nullptr,
- handles->NewHandle<mirror::DexCache>(nullptr),
- handles) {}
+ interpreter_metadata_(nullptr),
+ handles_(handles),
+ return_type_(return_type) {}
GraphAnalysisResult BuildGraph();
@@ -90,13 +73,16 @@
// it can be an inlined method.
const DexCompilationUnit* const dex_compilation_unit_;
+ // The compilation unit of the enclosing method being compiled.
+ const DexCompilationUnit* const outer_compilation_unit_;
+
CompilerDriver* const compiler_driver_;
+ CodeGenerator* const code_generator_;
- OptimizingCompilerStats* compilation_stats_;
-
- HBasicBlockBuilder block_builder_;
- SsaBuilder ssa_builder_;
- HInstructionBuilder instruction_builder_;
+ OptimizingCompilerStats* const compilation_stats_;
+ const uint8_t* const interpreter_metadata_;
+ VariableSizedHandleScope* const handles_;
+ const DataType::Type return_type_;
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
};
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 5117e07..3cc7b0e 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -18,13 +18,18 @@
#include "base/array_ref.h"
#include "base/bit_vector-inl.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "base/stl_util.h"
#include "ssa_phi_elimination.h"
namespace art {
static void MarkReachableBlocks(HGraph* graph, ArenaBitVector* visited) {
- ArenaVector<HBasicBlock*> worklist(graph->GetAllocator()->Adapter(kArenaAllocDCE));
+ // Use local allocator for allocating memory.
+ ScopedArenaAllocator allocator(graph->GetArenaStack());
+
+ ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocDCE));
constexpr size_t kDefaultWorlistSize = 8;
worklist.reserve(kDefaultWorlistSize);
visited->SetBit(graph->GetEntryBlock()->GetBlockId());
@@ -305,9 +310,12 @@
}
bool HDeadCodeElimination::RemoveDeadBlocks() {
+ // Use local allocator for allocating memory.
+ ScopedArenaAllocator allocator(graph_->GetArenaStack());
+
// Classify blocks as reachable/unreachable.
- ArenaAllocator* allocator = graph_->GetAllocator();
- ArenaBitVector live_blocks(allocator, graph_->GetBlocks().size(), false, kArenaAllocDCE);
+ ArenaBitVector live_blocks(&allocator, graph_->GetBlocks().size(), false, kArenaAllocDCE);
+ live_blocks.ClearAllBits();
MarkReachableBlocks(graph_, &live_blocks);
bool removed_one_or_more_blocks = false;
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 1c7d1a0..b1ac027 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -22,8 +22,9 @@
#include "android-base/stringprintf.h"
-#include "base/arena_containers.h"
#include "base/bit_vector-inl.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
namespace art {
@@ -47,10 +48,13 @@
void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
current_block_ = block;
+ // Use local allocator for allocating memory.
+ ScopedArenaAllocator allocator(GetGraph()->GetArenaStack());
+
// Check consistency with respect to predecessors of `block`.
// Note: Counting duplicates with a sorted vector uses up to 6x less memory
// than ArenaSafeMap<HBasicBlock*, size_t> and also allows storage reuse.
- ArenaVector<HBasicBlock*>& sorted_predecessors = blocks_storage_;
+ ScopedArenaVector<HBasicBlock*> sorted_predecessors(allocator.Adapter(kArenaAllocGraphChecker));
sorted_predecessors.assign(block->GetPredecessors().begin(), block->GetPredecessors().end());
std::sort(sorted_predecessors.begin(), sorted_predecessors.end());
for (auto it = sorted_predecessors.begin(), end = sorted_predecessors.end(); it != end; ) {
@@ -73,7 +77,7 @@
// Check consistency with respect to successors of `block`.
// Note: Counting duplicates with a sorted vector uses up to 6x less memory
// than ArenaSafeMap<HBasicBlock*, size_t> and also allows storage reuse.
- ArenaVector<HBasicBlock*>& sorted_successors = blocks_storage_;
+ ScopedArenaVector<HBasicBlock*> sorted_successors(allocator.Adapter(kArenaAllocGraphChecker));
sorted_successors.assign(block->GetSuccessors().begin(), block->GetSuccessors().end());
std::sort(sorted_successors.begin(), sorted_successors.end());
for (auto it = sorted_successors.begin(), end = sorted_successors.end(); it != end; ) {
@@ -829,10 +833,14 @@
phi->GetRegNumber(),
type_str.str().c_str()));
} else {
+ // Use local allocator for allocating memory.
+ ScopedArenaAllocator allocator(GetGraph()->GetArenaStack());
// If we get here, make sure we allocate all the necessary storage at once
// because the BitVector reallocation strategy has very bad worst-case behavior.
- ArenaBitVector& visited = visited_storage_;
- visited.SetBit(GetGraph()->GetCurrentInstructionId());
+ ArenaBitVector visited(&allocator,
+ GetGraph()->GetCurrentInstructionId(),
+ /* expandable */ false,
+ kArenaAllocGraphChecker);
visited.ClearAllBits();
if (!IsConstantEquivalent(phi, other_phi, &visited)) {
AddError(StringPrintf("Two phis (%d and %d) found for VReg %d but they "
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 6af7b42..0f0b49d 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -17,10 +17,13 @@
#ifndef ART_COMPILER_OPTIMIZING_GRAPH_CHECKER_H_
#define ART_COMPILER_OPTIMIZING_GRAPH_CHECKER_H_
-#include "nodes.h"
-
#include <ostream>
+#include "base/arena_bit_vector.h"
+#include "base/bit_vector-inl.h"
+#include "base/scoped_arena_allocator.h"
+#include "nodes.h"
+
namespace art {
// A control-flow graph visitor performing various checks.
@@ -30,12 +33,10 @@
: HGraphDelegateVisitor(graph),
errors_(graph->GetAllocator()->Adapter(kArenaAllocGraphChecker)),
dump_prefix_(dump_prefix),
- seen_ids_(graph->GetAllocator(),
- graph->GetCurrentInstructionId(),
- false,
- kArenaAllocGraphChecker),
- blocks_storage_(graph->GetAllocator()->Adapter(kArenaAllocGraphChecker)),
- visited_storage_(graph->GetAllocator(), 0u, true, kArenaAllocGraphChecker) {}
+ allocator_(graph->GetArenaStack()),
+ seen_ids_(&allocator_, graph->GetCurrentInstructionId(), false, kArenaAllocGraphChecker) {
+ seen_ids_.ClearAllBits();
+ }
// Check the whole graph (in reverse post-order).
void Run() {
@@ -104,12 +105,9 @@
private:
// String displayed before dumped errors.
const char* const dump_prefix_;
+ ScopedArenaAllocator allocator_;
ArenaBitVector seen_ids_;
- // To reduce the total arena memory allocation, we reuse the same storage.
- ArenaVector<HBasicBlock*> blocks_storage_;
- ArenaBitVector visited_storage_;
-
DISALLOW_COPY_AND_ASSIGN(GraphChecker);
};
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index eccdccf..3851877 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -21,6 +21,7 @@
#include <cctype>
#include <sstream>
+#include "art_method.h"
#include "bounds_check_elimination.h"
#include "builder.h"
#include "code_generator.h"
@@ -33,6 +34,7 @@
#include "optimization.h"
#include "reference_type_propagation.h"
#include "register_allocator_linear_scan.h"
+#include "scoped_thread_state_change-inl.h"
#include "ssa_liveness_analysis.h"
#include "utils/assembler.h"
#include "utils/intrusive_forward_list.h"
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index c09e5df..813772e 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -17,7 +17,8 @@
#include "gvn.h"
#include "base/arena_bit_vector.h"
-#include "base/arena_containers.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "base/bit_vector-inl.h"
#include "side_effects_analysis.h"
#include "utils.h"
@@ -36,7 +37,7 @@
class ValueSet : public ArenaObject<kArenaAllocGvn> {
public:
// Constructs an empty ValueSet which owns all its buckets.
- explicit ValueSet(ArenaAllocator* allocator)
+ explicit ValueSet(ScopedArenaAllocator* allocator)
: allocator_(allocator),
num_buckets_(kMinimumNumberOfBuckets),
buckets_(allocator->AllocArray<Node*>(num_buckets_, kArenaAllocGvn)),
@@ -44,12 +45,13 @@
num_entries_(0u) {
// ArenaAllocator returns zeroed memory, so no need to set buckets to null.
DCHECK(IsPowerOfTwo(num_buckets_));
+ std::fill_n(buckets_, num_buckets_, nullptr);
buckets_owned_.SetInitialBits(num_buckets_);
}
// Copy constructor. Depending on the load factor, it will either make a deep
// copy (all buckets owned) or a shallow one (buckets pointing to the parent).
- ValueSet(ArenaAllocator* allocator, const ValueSet& other)
+ ValueSet(ScopedArenaAllocator* allocator, const ValueSet& other)
: allocator_(allocator),
num_buckets_(other.IdealBucketCount()),
buckets_(allocator->AllocArray<Node*>(num_buckets_, kArenaAllocGvn)),
@@ -58,7 +60,7 @@
// ArenaAllocator returns zeroed memory, so entries of buckets_ and
// buckets_owned_ are initialized to null and false, respectively.
DCHECK(IsPowerOfTwo(num_buckets_));
- PopulateFromInternal(other, /* is_dirty */ false);
+ PopulateFromInternal(other);
}
// Erases all values in this set and populates it with values from `other`.
@@ -66,7 +68,7 @@
if (this == &other) {
return;
}
- PopulateFromInternal(other, /* is_dirty */ true);
+ PopulateFromInternal(other);
}
// Returns true if `this` has enough buckets so that if `other` is copied into
@@ -159,33 +161,19 @@
private:
// Copies all entries from `other` to `this`.
- // If `is_dirty` is set to true, existing data will be wiped first. It is
- // assumed that `buckets_` and `buckets_owned_` are zero-allocated otherwise.
- void PopulateFromInternal(const ValueSet& other, bool is_dirty) {
+ void PopulateFromInternal(const ValueSet& other) {
DCHECK_NE(this, &other);
DCHECK_GE(num_buckets_, other.IdealBucketCount());
if (num_buckets_ == other.num_buckets_) {
// Hash table remains the same size. We copy the bucket pointers and leave
// all buckets_owned_ bits false.
- if (is_dirty) {
- buckets_owned_.ClearAllBits();
- } else {
- DCHECK_EQ(buckets_owned_.NumSetBits(), 0u);
- }
+ buckets_owned_.ClearAllBits();
memcpy(buckets_, other.buckets_, num_buckets_ * sizeof(Node*));
} else {
// Hash table size changes. We copy and rehash all entries, and set all
// buckets_owned_ bits to true.
- if (is_dirty) {
- memset(buckets_, 0, num_buckets_ * sizeof(Node*));
- } else {
- if (kIsDebugBuild) {
- for (size_t i = 0; i < num_buckets_; ++i) {
- DCHECK(buckets_[i] == nullptr) << i;
- }
- }
- }
+ std::fill_n(buckets_, num_buckets_, nullptr);
for (size_t i = 0; i < other.num_buckets_; ++i) {
for (Node* node = other.buckets_[i]; node != nullptr; node = node->GetNext()) {
size_t new_index = BucketIndex(node->GetHashCode());
@@ -208,7 +196,7 @@
Node* GetNext() const { return next_; }
void SetNext(Node* node) { next_ = node; }
- Node* Dup(ArenaAllocator* allocator, Node* new_next = nullptr) {
+ Node* Dup(ScopedArenaAllocator* allocator, Node* new_next = nullptr) {
return new (allocator) Node(instruction_, hash_code_, new_next);
}
@@ -326,7 +314,7 @@
return hash_code & (num_buckets_ - 1);
}
- ArenaAllocator* const allocator_;
+ ScopedArenaAllocator* const allocator_;
// The internal bucket implementation of the set.
size_t const num_buckets_;
@@ -350,15 +338,16 @@
*/
class GlobalValueNumberer : public ValueObject {
public:
- GlobalValueNumberer(ArenaAllocator* allocator,
- HGraph* graph,
+ GlobalValueNumberer(HGraph* graph,
const SideEffectsAnalysis& side_effects)
: graph_(graph),
- allocator_(allocator),
+ allocator_(graph->GetArenaStack()),
side_effects_(side_effects),
- sets_(graph->GetBlocks().size(), nullptr, allocator->Adapter(kArenaAllocGvn)),
+ sets_(graph->GetBlocks().size(), nullptr, allocator_.Adapter(kArenaAllocGvn)),
visited_blocks_(
- allocator, graph->GetBlocks().size(), /* expandable */ false, kArenaAllocGvn) {}
+ &allocator_, graph->GetBlocks().size(), /* expandable */ false, kArenaAllocGvn) {
+ visited_blocks_.ClearAllBits();
+ }
void Run();
@@ -368,7 +357,7 @@
void VisitBasicBlock(HBasicBlock* block);
HGraph* graph_;
- ArenaAllocator* const allocator_;
+ ScopedArenaAllocator allocator_;
const SideEffectsAnalysis& side_effects_;
ValueSet* FindSetFor(HBasicBlock* block) const {
@@ -396,7 +385,7 @@
// ValueSet for blocks. Initially null, but for an individual block they
// are allocated and populated by the dominator, and updated by all blocks
// in the path from the dominator to the block.
- ArenaVector<ValueSet*> sets_;
+ ScopedArenaVector<ValueSet*> sets_;
// BitVector which serves as a fast-access map from block id to
// visited/unvisited Boolean.
@@ -407,7 +396,7 @@
void GlobalValueNumberer::Run() {
DCHECK(side_effects_.HasRun());
- sets_[graph_->GetEntryBlock()->GetBlockId()] = new (allocator_) ValueSet(allocator_);
+ sets_[graph_->GetEntryBlock()->GetBlockId()] = new (&allocator_) ValueSet(&allocator_);
// Use the reverse post order to ensure the non back-edge predecessors of a block are
// visited before the block itself.
@@ -424,7 +413,7 @@
// The entry block should only accumulate constant instructions, and
// the builder puts constants only in the entry block.
// Therefore, there is no need to propagate the value set to the next block.
- set = new (allocator_) ValueSet(allocator_);
+ set = new (&allocator_) ValueSet(&allocator_);
} else {
HBasicBlock* dominator = block->GetDominator();
ValueSet* dominator_set = FindSetFor(dominator);
@@ -443,7 +432,7 @@
if (recyclable == nullptr) {
// No block with a suitable ValueSet found. Allocate a new one and
// copy `dominator_set` into it.
- set = new (allocator_) ValueSet(allocator_, *dominator_set);
+ set = new (&allocator_) ValueSet(&allocator_, *dominator_set);
} else {
// Block with a recyclable ValueSet found. Clone `dominator_set` into it.
set = FindSetFor(recyclable);
@@ -566,7 +555,7 @@
}
void GVNOptimization::Run() {
- GlobalValueNumberer gvn(graph_->GetAllocator(), graph_, side_effects_);
+ GlobalValueNumberer gvn(graph_, side_effects_);
gvn.Run();
}
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index b06d91c..902985e 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -17,15 +17,23 @@
#include "instruction_builder.h"
#include "art_method-inl.h"
+#include "base/arena_bit_vector.h"
+#include "base/bit_vector-inl.h"
+#include "block_builder.h"
#include "bytecode_utils.h"
#include "class_linker.h"
#include "data_type-inl.h"
#include "dex_instruction-inl.h"
+#include "driver/compiler_driver-inl.h"
+#include "driver/dex_compilation_unit.h"
#include "driver/compiler_options.h"
#include "imtable-inl.h"
+#include "mirror/dex_cache.h"
+#include "optimizing_compiler_stats.h"
#include "quicken_info.h"
#include "scoped_thread_state_change-inl.h"
#include "sharpening.h"
+#include "ssa_builder.h"
#include "well_known_classes.h"
namespace art {
@@ -34,8 +42,8 @@
return block_builder_->GetBlockAt(dex_pc);
}
-inline ArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsFor(HBasicBlock* block) {
- ArenaVector<HInstruction*>* locals = &locals_for_[block->GetBlockId()];
+inline ScopedArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsFor(HBasicBlock* block) {
+ ScopedArenaVector<HInstruction*>* locals = &locals_for_[block->GetBlockId()];
const size_t vregs = graph_->GetNumberOfVRegs();
if (locals->size() == vregs) {
return locals;
@@ -43,9 +51,9 @@
return GetLocalsForWithAllocation(block, locals, vregs);
}
-ArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsForWithAllocation(
+ScopedArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsForWithAllocation(
HBasicBlock* block,
- ArenaVector<HInstruction*>* locals,
+ ScopedArenaVector<HInstruction*>* locals,
const size_t vregs) {
DCHECK_NE(locals->size(), vregs);
locals->resize(vregs, nullptr);
@@ -73,7 +81,7 @@
}
inline HInstruction* HInstructionBuilder::ValueOfLocalAt(HBasicBlock* block, size_t local) {
- ArenaVector<HInstruction*>* locals = GetLocalsFor(block);
+ ScopedArenaVector<HInstruction*>* locals = GetLocalsFor(block);
return (*locals)[local];
}
@@ -168,7 +176,7 @@
void HInstructionBuilder::PropagateLocalsToCatchBlocks() {
const HTryBoundary& try_entry = current_block_->GetTryCatchInformation()->GetTryEntry();
for (HBasicBlock* catch_block : try_entry.GetExceptionHandlers()) {
- ArenaVector<HInstruction*>* handler_locals = GetLocalsFor(catch_block);
+ ScopedArenaVector<HInstruction*>* handler_locals = GetLocalsFor(catch_block);
DCHECK_EQ(handler_locals->size(), current_locals_->size());
for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
HInstruction* handler_value = (*handler_locals)[vreg];
@@ -216,7 +224,7 @@
graph_->GetArtMethod(),
instruction->GetDexPc(),
instruction);
- environment->CopyFrom(*current_locals_);
+ environment->CopyFrom(ArrayRef<HInstruction* const>(*current_locals_));
instruction->SetRawEnvironment(environment);
}
}
@@ -264,8 +272,9 @@
}
bool HInstructionBuilder::Build() {
- locals_for_.resize(graph_->GetBlocks().size(),
- ArenaVector<HInstruction*>(allocator_->Adapter(kArenaAllocGraphBuilder)));
+ locals_for_.resize(
+ graph_->GetBlocks().size(),
+ ScopedArenaVector<HInstruction*>(local_allocator_->Adapter(kArenaAllocGraphBuilder)));
// Find locations where we want to generate extra stackmaps for native debugging.
// This allows us to generate the info only at interesting points (for example,
@@ -274,10 +283,7 @@
compiler_driver_->GetCompilerOptions().GetNativeDebuggable();
ArenaBitVector* native_debug_info_locations = nullptr;
if (native_debuggable) {
- const uint32_t num_instructions = code_item_.insns_size_in_code_units_;
- native_debug_info_locations =
- new (allocator_) ArenaBitVector (allocator_, num_instructions, false);
- FindNativeDebugInfoLocations(native_debug_info_locations);
+ native_debug_info_locations = FindNativeDebugInfoLocations();
}
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
@@ -358,7 +364,7 @@
return true;
}
-void HInstructionBuilder::FindNativeDebugInfoLocations(ArenaBitVector* locations) {
+ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() {
// The callback gets called when the line number changes.
// In other words, it marks the start of new java statement.
struct Callback {
@@ -367,6 +373,12 @@
return false;
}
};
+ const uint32_t num_instructions = code_item_.insns_size_in_code_units_;
+ ArenaBitVector* locations = ArenaBitVector::Create(local_allocator_,
+ num_instructions,
+ /* expandable */ false,
+ kArenaAllocGraphBuilder);
+ locations->ClearAllBits();
dex_file_->DecodeDebugPositionInfo(&code_item_, Callback::Position, locations);
// Instruction-specific tweaks.
IterationRange<DexInstructionIterator> instructions = code_item_.Instructions();
@@ -387,6 +399,7 @@
break;
}
}
+ return locations;
}
HInstruction* HInstructionBuilder::LoadLocal(uint32_t reg_number, DataType::Type type) const {
@@ -439,8 +452,8 @@
void HInstructionBuilder::InitializeParameters() {
DCHECK(current_block_->IsEntryBlock());
- // dex_compilation_unit_ is null only when unit testing.
- if (dex_compilation_unit_ == nullptr) {
+ // outer_compilation_unit_ is null only when unit testing.
+ if (outer_compilation_unit_ == nullptr) {
return;
}
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 79d6ddc..058b711 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -17,23 +17,32 @@
#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_BUILDER_H_
#define ART_COMPILER_OPTIMIZING_INSTRUCTION_BUILDER_H_
-#include "base/arena_containers.h"
-#include "base/arena_object.h"
-#include "block_builder.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
+#include "data_type.h"
+#include "dex_file.h"
#include "dex_file_types.h"
-#include "driver/compiler_driver-inl.h"
-#include "driver/compiler_driver.h"
-#include "driver/dex_compilation_unit.h"
-#include "mirror/dex_cache.h"
+#include "handle.h"
#include "nodes.h"
-#include "optimizing_compiler_stats.h"
#include "quicken_info.h"
-#include "ssa_builder.h"
namespace art {
+class ArenaBitVector;
+class ArtField;
+class ArtMethod;
class CodeGenerator;
+class CompilerDriver;
+class DexCompilationUnit;
+class HBasicBlockBuilder;
class Instruction;
+class OptimizingCompilerStats;
+class SsaBuilder;
+class VariableSizedHandleScope;
+
+namespace mirror {
+class Class;
+} // namespace mirror
class HInstructionBuilder : public ValueObject {
public:
@@ -45,12 +54,12 @@
DataType::Type return_type,
const DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* outer_compilation_unit,
- CompilerDriver* driver,
+ CompilerDriver* compiler_driver,
CodeGenerator* code_generator,
const uint8_t* interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
- Handle<mirror::DexCache> dex_cache,
- VariableSizedHandleScope* handles)
+ VariableSizedHandleScope* handles,
+ ScopedArenaAllocator* local_allocator)
: allocator_(graph->GetAllocator()),
graph_(graph),
handles_(handles),
@@ -59,19 +68,19 @@
return_type_(return_type),
block_builder_(block_builder),
ssa_builder_(ssa_builder),
- locals_for_(allocator_->Adapter(kArenaAllocGraphBuilder)),
- current_block_(nullptr),
- current_locals_(nullptr),
- latest_result_(nullptr),
- current_this_parameter_(nullptr),
- compiler_driver_(driver),
+ compiler_driver_(compiler_driver),
code_generator_(code_generator),
dex_compilation_unit_(dex_compilation_unit),
outer_compilation_unit_(outer_compilation_unit),
quicken_info_(interpreter_metadata),
compilation_stats_(compiler_stats),
- dex_cache_(dex_cache),
- loop_headers_(allocator_->Adapter(kArenaAllocGraphBuilder)) {
+ local_allocator_(local_allocator),
+ locals_for_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
+ current_block_(nullptr),
+ current_locals_(nullptr),
+ latest_result_(nullptr),
+ current_this_parameter_(nullptr),
+ loop_headers_(local_allocator->Adapter(kArenaAllocGraphBuilder)) {
loop_headers_.reserve(kDefaultNumberOfLoops);
}
@@ -83,18 +92,18 @@
void SetLoopHeaderPhiInputs();
bool ProcessDexInstruction(const Instruction& instruction, uint32_t dex_pc, size_t quicken_index);
- void FindNativeDebugInfoLocations(ArenaBitVector* locations);
+ ArenaBitVector* FindNativeDebugInfoLocations();
bool CanDecodeQuickenedInfo() const;
uint16_t LookupQuickenedInfo(uint32_t quicken_index);
HBasicBlock* FindBlockStartingAt(uint32_t dex_pc) const;
- ArenaVector<HInstruction*>* GetLocalsFor(HBasicBlock* block);
+ ScopedArenaVector<HInstruction*>* GetLocalsFor(HBasicBlock* block);
// Out of line version of GetLocalsFor(), which has a fast path that is
// beneficial to get inlined by callers.
- ArenaVector<HInstruction*>* GetLocalsForWithAllocation(
- HBasicBlock* block, ArenaVector<HInstruction*>* locals, const size_t vregs);
+ ScopedArenaVector<HInstruction*>* GetLocalsForWithAllocation(
+ HBasicBlock* block, ScopedArenaVector<HInstruction*>* locals, const size_t vregs);
HInstruction* ValueOfLocalAt(HBasicBlock* block, size_t local);
HInstruction* LoadLocal(uint32_t register_index, DataType::Type type) const;
HInstruction* LoadNullCheckedLocal(uint32_t register_index, uint32_t dex_pc);
@@ -314,7 +323,7 @@
ArenaAllocator* const allocator_;
HGraph* const graph_;
- VariableSizedHandleScope* handles_;
+ VariableSizedHandleScope* const handles_;
// The dex file where the method being compiled is, and the bytecode data.
const DexFile* const dex_file_;
@@ -323,18 +332,8 @@
// The return type of the method being compiled.
const DataType::Type return_type_;
- HBasicBlockBuilder* block_builder_;
- SsaBuilder* ssa_builder_;
-
- ArenaVector<ArenaVector<HInstruction*>> locals_for_;
- HBasicBlock* current_block_;
- ArenaVector<HInstruction*>* current_locals_;
- HInstruction* latest_result_;
- // Current "this" parameter.
- // Valid only after InitializeParameters() finishes.
- // * Null for static methods.
- // * Non-null for instance methods.
- HParameterValue* current_this_parameter_;
+ HBasicBlockBuilder* const block_builder_;
+ SsaBuilder* const ssa_builder_;
CompilerDriver* const compiler_driver_;
@@ -352,10 +351,20 @@
// Original values kept after instruction quickening.
QuickenInfoTable quicken_info_;
- OptimizingCompilerStats* compilation_stats_;
- Handle<mirror::DexCache> dex_cache_;
+ OptimizingCompilerStats* const compilation_stats_;
- ArenaVector<HBasicBlock*> loop_headers_;
+ ScopedArenaAllocator* const local_allocator_;
+ ScopedArenaVector<ScopedArenaVector<HInstruction*>> locals_for_;
+ HBasicBlock* current_block_;
+ ScopedArenaVector<HInstruction*>* current_locals_;
+ HInstruction* latest_result_;
+ // Current "this" parameter.
+ // Valid only after InitializeParameters() finishes.
+ // * Null for static methods.
+ // * Non-null for instance methods.
+ HParameterValue* current_this_parameter_;
+
+ ScopedArenaVector<HBasicBlock*> loop_headers_;
static constexpr int kDefaultNumberOfLoops = 2;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index f39acab..afe7484 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1284,9 +1284,9 @@
DCHECK(input_other->IsShr()); // For UShr, we would have taken the branch above.
// Replace SHR+AND with USHR, for example "(x >> 24) & 0xff" -> "x >>> 24".
HUShr* ushr = new (GetGraph()->GetAllocator()) HUShr(instruction->GetType(),
- input_other->InputAt(0),
- input_other->InputAt(1),
- input_other->GetDexPc());
+ input_other->InputAt(0),
+ input_other->InputAt(1),
+ input_other->GetDexPc());
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, ushr);
input_other->GetBlock()->RemoveInstruction(input_other);
RecordSimplification();
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index 3533c88..033a644 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -57,8 +57,8 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* allocator_;
- CodeGeneratorARM64* codegen_;
+ ArenaAllocator* const allocator_;
+ CodeGeneratorARM64* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64);
};
@@ -81,7 +81,7 @@
ArenaAllocator* GetAllocator();
- CodeGeneratorARM64* codegen_;
+ CodeGeneratorARM64* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorARM64);
};
diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h
index 4f18ca3..9c02d0a 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.h
+++ b/compiler/optimizing/intrinsics_arm_vixl.h
@@ -46,9 +46,9 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* allocator_;
- CodeGenerator* codegen_;
- ArmVIXLAssembler* assembler_;
+ ArenaAllocator* const allocator_;
+ CodeGenerator* const codegen_;
+ ArmVIXLAssembler* const assembler_;
const ArmInstructionSetFeatures& features_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARMVIXL);
@@ -71,7 +71,7 @@
ArenaAllocator* GetAllocator();
ArmVIXLAssembler* GetAssembler();
- CodeGeneratorARMVIXL* codegen_;
+ CodeGeneratorARMVIXL* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorARMVIXL);
};
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
index afd9548..13397f1 100644
--- a/compiler/optimizing/intrinsics_mips.h
+++ b/compiler/optimizing/intrinsics_mips.h
@@ -49,8 +49,8 @@
bool TryDispatch(HInvoke* invoke);
private:
- CodeGeneratorMIPS* codegen_;
- ArenaAllocator* allocator_;
+ CodeGeneratorMIPS* const codegen_;
+ ArenaAllocator* const allocator_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS);
};
@@ -77,7 +77,7 @@
ArenaAllocator* GetAllocator();
- CodeGeneratorMIPS* codegen_;
+ CodeGeneratorMIPS* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorMIPS);
};
diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h
index 6085c7b..6f40d90 100644
--- a/compiler/optimizing/intrinsics_mips64.h
+++ b/compiler/optimizing/intrinsics_mips64.h
@@ -49,8 +49,8 @@
bool TryDispatch(HInvoke* invoke);
private:
- CodeGeneratorMIPS64* codegen_;
- ArenaAllocator* allocator_;
+ CodeGeneratorMIPS64* const codegen_;
+ ArenaAllocator* const allocator_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS64);
};
@@ -73,7 +73,7 @@
ArenaAllocator* GetAllocator();
- CodeGeneratorMIPS64* codegen_;
+ CodeGeneratorMIPS64* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorMIPS64);
};
diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h
index ba3ca0a..e3555e7 100644
--- a/compiler/optimizing/intrinsics_x86.h
+++ b/compiler/optimizing/intrinsics_x86.h
@@ -49,8 +49,8 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* allocator_;
- CodeGeneratorX86* codegen_;
+ ArenaAllocator* const allocator_;
+ CodeGeneratorX86* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86);
};
@@ -73,7 +73,7 @@
ArenaAllocator* GetAllocator();
- CodeGeneratorX86* codegen_;
+ CodeGeneratorX86* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorX86);
};
diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h
index b0fbe91..5cb601e 100644
--- a/compiler/optimizing/intrinsics_x86_64.h
+++ b/compiler/optimizing/intrinsics_x86_64.h
@@ -49,8 +49,8 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* allocator_;
- CodeGeneratorX86_64* codegen_;
+ ArenaAllocator* const allocator_;
+ CodeGeneratorX86_64* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86_64);
};
@@ -73,7 +73,7 @@
ArenaAllocator* GetAllocator();
- CodeGeneratorX86_64* codegen_;
+ CodeGeneratorX86_64* const codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorX86_64);
};
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index 6a25da3..5940ee7 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -25,7 +25,7 @@
// A ReferenceInfo contains additional info about a reference such as
// whether it's a singleton, returned, etc.
-class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
+class ReferenceInfo : public ArenaObject<kArenaAllocLSA> {
public:
ReferenceInfo(HInstruction* reference, size_t pos)
: reference_(reference),
@@ -99,7 +99,7 @@
// A heap location is a reference-offset/index pair that a value can be loaded from
// or stored to.
-class HeapLocation : public ArenaObject<kArenaAllocMisc> {
+class HeapLocation : public ArenaObject<kArenaAllocLSA> {
public:
static constexpr size_t kInvalidFieldOffset = -1;
@@ -172,12 +172,12 @@
explicit HeapLocationCollector(HGraph* graph)
: HGraphVisitor(graph),
- ref_info_array_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
- heap_locations_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ ref_info_array_(graph->GetAllocator()->Adapter(kArenaAllocLSA)),
+ heap_locations_(graph->GetAllocator()->Adapter(kArenaAllocLSA)),
aliasing_matrix_(graph->GetAllocator(),
kInitialAliasingMatrixBitVectorSize,
true,
- kArenaAllocLSE),
+ kArenaAllocLSA),
has_heap_stores_(false),
has_volatile_(false),
has_monitor_operations_(false) {}
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 39bfc86..af5585e 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -16,6 +16,9 @@
#include "load_store_elimination.h"
+#include "base/array_ref.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "escape.h"
#include "load_store_analysis.h"
#include "side_effects_analysis.h"
@@ -45,17 +48,18 @@
: HGraphVisitor(graph, stats),
heap_location_collector_(heap_locations_collector),
side_effects_(side_effects),
+ allocator_(graph->GetArenaStack()),
heap_values_for_(graph->GetBlocks().size(),
- ArenaVector<HInstruction*>(heap_locations_collector.
- GetNumberOfHeapLocations(),
- kUnknownHeapValue,
- graph->GetAllocator()->Adapter(kArenaAllocLSE)),
- graph->GetAllocator()->Adapter(kArenaAllocLSE)),
- removed_loads_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
- substitute_instructions_for_loads_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
- possibly_removed_stores_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
- singleton_new_instances_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
- singleton_new_arrays_(graph->GetAllocator()->Adapter(kArenaAllocLSE)) {
+ ScopedArenaVector<HInstruction*>(heap_locations_collector.
+ GetNumberOfHeapLocations(),
+ kUnknownHeapValue,
+ allocator_.Adapter(kArenaAllocLSE)),
+ allocator_.Adapter(kArenaAllocLSE)),
+ removed_loads_(allocator_.Adapter(kArenaAllocLSE)),
+ substitute_instructions_for_loads_(allocator_.Adapter(kArenaAllocLSE)),
+ possibly_removed_stores_(allocator_.Adapter(kArenaAllocLSE)),
+ singleton_new_instances_(allocator_.Adapter(kArenaAllocLSE)),
+ singleton_new_arrays_(allocator_.Adapter(kArenaAllocLSE)) {
}
void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
@@ -146,7 +150,7 @@
void HandleLoopSideEffects(HBasicBlock* block) {
DCHECK(block->IsLoopHeader());
int block_id = block->GetBlockId();
- ArenaVector<HInstruction*>& heap_values = heap_values_for_[block_id];
+ ScopedArenaVector<HInstruction*>& heap_values = heap_values_for_[block_id];
// Don't eliminate loads in irreducible loops. This is safe for singletons, because
// they are always used by the non-eliminated loop-phi.
@@ -160,7 +164,7 @@
}
HBasicBlock* pre_header = block->GetLoopInformation()->GetPreHeader();
- ArenaVector<HInstruction*>& pre_header_heap_values =
+ ScopedArenaVector<HInstruction*>& pre_header_heap_values =
heap_values_for_[pre_header->GetBlockId()];
// Inherit the values from pre-header.
@@ -191,12 +195,12 @@
}
void MergePredecessorValues(HBasicBlock* block) {
- const ArenaVector<HBasicBlock*>& predecessors = block->GetPredecessors();
+ ArrayRef<HBasicBlock* const> predecessors(block->GetPredecessors());
if (predecessors.size() == 0) {
return;
}
- ArenaVector<HInstruction*>& heap_values = heap_values_for_[block->GetBlockId()];
+ ScopedArenaVector<HInstruction*>& heap_values = heap_values_for_[block->GetBlockId()];
for (size_t i = 0; i < heap_values.size(); i++) {
HInstruction* merged_value = nullptr;
// Whether merged_value is a result that's merged from all predecessors.
@@ -234,7 +238,8 @@
// or the heap value may be needed after method return or deoptimization.
// Keep the last store in each predecessor since future loads cannot be eliminated.
for (HBasicBlock* predecessor : predecessors) {
- ArenaVector<HInstruction*>& pred_values = heap_values_for_[predecessor->GetBlockId()];
+ ScopedArenaVector<HInstruction*>& pred_values =
+ heap_values_for_[predecessor->GetBlockId()];
KeepIfIsStore(pred_values[i]);
}
}
@@ -303,7 +308,7 @@
size_t idx = heap_location_collector_.FindHeapLocationIndex(
ref_info, offset, index, declaring_class_def_index);
DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
- ArenaVector<HInstruction*>& heap_values =
+ ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
HInstruction* heap_value = heap_values[idx];
if (heap_value == kDefaultHeapValue) {
@@ -369,7 +374,7 @@
size_t idx = heap_location_collector_.FindHeapLocationIndex(
ref_info, offset, index, declaring_class_def_index);
DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
- ArenaVector<HInstruction*>& heap_values =
+ ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
HInstruction* heap_value = heap_values[idx];
bool same_value = false;
@@ -496,7 +501,7 @@
}
void VisitDeoptimize(HDeoptimize* instruction) {
- const ArenaVector<HInstruction*>& heap_values =
+ const ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
for (HInstruction* heap_value : heap_values) {
// Filter out fake instructions before checking instruction kind below.
@@ -523,7 +528,7 @@
}
void HandleInvoke(HInstruction* invoke) {
- ArenaVector<HInstruction*>& heap_values =
+ ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[invoke->GetBlock()->GetBlockId()];
for (size_t i = 0; i < heap_values.size(); i++) {
ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
@@ -590,7 +595,7 @@
!new_instance->NeedsChecks()) {
singleton_new_instances_.push_back(new_instance);
}
- ArenaVector<HInstruction*>& heap_values =
+ ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[new_instance->GetBlock()->GetBlockId()];
for (size_t i = 0; i < heap_values.size(); i++) {
HInstruction* ref =
@@ -612,7 +617,7 @@
if (ref_info->IsSingletonAndRemovable()) {
singleton_new_arrays_.push_back(new_array);
}
- ArenaVector<HInstruction*>& heap_values =
+ ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[new_array->GetBlock()->GetBlockId()];
for (size_t i = 0; i < heap_values.size(); i++) {
HeapLocation* location = heap_location_collector_.GetHeapLocation(i);
@@ -639,20 +644,23 @@
const HeapLocationCollector& heap_location_collector_;
const SideEffectsAnalysis& side_effects_;
+ // Use local allocator for allocating memory.
+ ScopedArenaAllocator allocator_;
+
// One array of heap values for each block.
- ArenaVector<ArenaVector<HInstruction*>> heap_values_for_;
+ ScopedArenaVector<ScopedArenaVector<HInstruction*>> heap_values_for_;
// We record the instructions that should be eliminated but may be
// used by heap locations. They'll be removed in the end.
- ArenaVector<HInstruction*> removed_loads_;
- ArenaVector<HInstruction*> substitute_instructions_for_loads_;
+ ScopedArenaVector<HInstruction*> removed_loads_;
+ ScopedArenaVector<HInstruction*> substitute_instructions_for_loads_;
// Stores in this list may be removed from the list later when it's
// found that the store cannot be eliminated.
- ArenaVector<HInstruction*> possibly_removed_stores_;
+ ScopedArenaVector<HInstruction*> possibly_removed_stores_;
- ArenaVector<HInstruction*> singleton_new_instances_;
- ArenaVector<HInstruction*> singleton_new_arrays_;
+ ScopedArenaVector<HInstruction*> singleton_new_instances_;
+ ScopedArenaVector<HInstruction*> singleton_new_arrays_;
DISALLOW_COPY_AND_ASSIGN(LSEVisitor);
};
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 1a537ca..f4f6434 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -55,14 +55,18 @@
// "visited" must be empty on entry, it's an output argument for all visited (i.e. live) blocks.
DCHECK_EQ(visited->GetHighestBitSet(), -1);
+ // Allocate memory from local ScopedArenaAllocator.
+ ScopedArenaAllocator allocator(GetArenaStack());
// Nodes that we're currently visiting, indexed by block id.
- ArenaBitVector visiting(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder);
+ ArenaBitVector visiting(
+ &allocator, blocks_.size(), /* expandable */ false, kArenaAllocGraphBuilder);
+ visiting.ClearAllBits();
// Number of successors visited from a given node, indexed by block id.
- ArenaVector<size_t> successors_visited(blocks_.size(),
- 0u,
- allocator_->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaVector<size_t> successors_visited(blocks_.size(),
+ 0u,
+ allocator.Adapter(kArenaAllocGraphBuilder));
// Stack of nodes that we're currently visiting (same as marked in "visiting" above).
- ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocGraphBuilder));
constexpr size_t kDefaultWorklistSize = 8;
worklist.reserve(kDefaultWorklistSize);
visited->SetBit(entry_block_->GetBlockId());
@@ -173,7 +177,11 @@
}
GraphAnalysisResult HGraph::BuildDominatorTree() {
- ArenaBitVector visited(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder);
+ // Allocate memory from local ScopedArenaAllocator.
+ ScopedArenaAllocator allocator(GetArenaStack());
+
+ ArenaBitVector visited(&allocator, blocks_.size(), false, kArenaAllocGraphBuilder);
+ visited.ClearAllBits();
// (1) Find the back edges in the graph doing a DFS traversal.
FindBackEdges(&visited);
@@ -258,14 +266,16 @@
reverse_post_order_.reserve(blocks_.size());
reverse_post_order_.push_back(entry_block_);
+ // Allocate memory from local ScopedArenaAllocator.
+ ScopedArenaAllocator allocator(GetArenaStack());
// Number of visits of a given node, indexed by block id.
- ArenaVector<size_t> visits(blocks_.size(), 0u, allocator_->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaVector<size_t> visits(blocks_.size(), 0u, allocator.Adapter(kArenaAllocGraphBuilder));
// Number of successors visited from a given node, indexed by block id.
- ArenaVector<size_t> successors_visited(blocks_.size(),
- 0u,
- allocator_->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaVector<size_t> successors_visited(blocks_.size(),
+ 0u,
+ allocator.Adapter(kArenaAllocGraphBuilder));
// Nodes for which we need to visit successors.
- ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocGraphBuilder));
constexpr size_t kDefaultWorklistSize = 8;
worklist.reserve(kDefaultWorklistSize);
worklist.push_back(entry_block_);
@@ -710,10 +720,13 @@
bool is_irreducible_loop = HasBackEdgeNotDominatedByHeader();
if (is_irreducible_loop) {
- ArenaBitVector visited(graph->GetAllocator(),
+ // Allocate memory from local ScopedArenaAllocator.
+ ScopedArenaAllocator allocator(graph->GetArenaStack());
+ ArenaBitVector visited(&allocator,
graph->GetBlocks().size(),
/* expandable */ false,
kArenaAllocGraphBuilder);
+ visited.ClearAllBits();
// Stop marking blocks at the loop header.
visited.SetBit(header_->GetBlockId());
@@ -942,7 +955,7 @@
}
}
-void HEnvironment::CopyFrom(const ArenaVector<HInstruction*>& locals) {
+void HEnvironment::CopyFrom(ArrayRef<HInstruction* const> locals) {
for (size_t i = 0; i < locals.size(); i++) {
HInstruction* instruction = locals[i];
SetRawEnvAt(i, instruction);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 99fde75..75cdb3e 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1839,7 +1839,7 @@
}
}
- void CopyFrom(const ArenaVector<HInstruction*>& locals);
+ void CopyFrom(ArrayRef<HInstruction* const> locals);
void CopyFrom(HEnvironment* environment);
// Copy from `env`. If it's a loop phi for `loop_header`, copy the first
diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc
index b2180d9..9bfd250 100644
--- a/compiler/optimizing/nodes_test.cc
+++ b/compiler/optimizing/nodes_test.cc
@@ -129,10 +129,9 @@
HEnvironment* environment = new (GetAllocator()) HEnvironment(
GetAllocator(), 1, graph->GetArtMethod(), 0, with_environment);
- ArenaVector<HInstruction*> array(GetAllocator()->Adapter());
- array.push_back(parameter1);
+ HInstruction* const array[] = { parameter1 };
- environment->CopyFrom(array);
+ environment->CopyFrom(ArrayRef<HInstruction* const>(array));
with_environment->SetRawEnvironment(environment);
ASSERT_TRUE(parameter1->HasEnvironmentUses());
@@ -140,13 +139,13 @@
HEnvironment* parent1 = new (GetAllocator()) HEnvironment(
GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr);
- parent1->CopyFrom(array);
+ parent1->CopyFrom(ArrayRef<HInstruction* const>(array));
ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 2u);
HEnvironment* parent2 = new (GetAllocator()) HEnvironment(
GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr);
- parent2->CopyFrom(array);
+ parent2->CopyFrom(ArrayRef<HInstruction* const>(array));
parent1->SetAndCopyParentChain(GetAllocator(), parent2);
// One use for parent2, and one other use for the new parent of parent1.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 9bfb7a5..42f32b7 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1146,7 +1146,8 @@
if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
MemStats mem_stats(allocator.GetMemStats());
MemStats peak_stats(arena_stack.GetPeakStats());
- LOG(INFO) << dex_file.PrettyMethod(method_idx)
+ LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
+ << dex_file.PrettyMethod(method_idx)
<< "\n" << Dumpable<MemStats>(mem_stats)
<< "\n" << Dumpable<MemStats>(peak_stats);
}
@@ -1256,7 +1257,8 @@
if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
MemStats mem_stats(allocator.GetMemStats());
MemStats peak_stats(arena_stack.GetPeakStats());
- LOG(INFO) << dex_file->PrettyMethod(method_idx)
+ LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
+ << dex_file->PrettyMethod(method_idx)
<< "\n" << Dumpable<MemStats>(mem_stats)
<< "\n" << Dumpable<MemStats>(peak_stats);
}
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 5632f9a..9aba912 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -22,7 +22,9 @@
#include "common_compiler_test.h"
#include "dex_file.h"
#include "dex_instruction.h"
-#include "handle_scope.h"
+#include "handle_scope-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
#include "nodes.h"
#include "scoped_thread_state_change.h"
#include "ssa_builder.h"
@@ -123,8 +125,7 @@
// Create a control-flow graph from Dex instructions.
HGraph* CreateCFG(const uint16_t* data, DataType::Type return_type = DataType::Type::kInt32) {
- const DexFile::CodeItem* item =
- reinterpret_cast<const DexFile::CodeItem*>(data);
+ const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(data);
HGraph* graph = CreateGraph();
{
@@ -132,7 +133,19 @@
if (handles_ == nullptr) {
handles_.reset(new VariableSizedHandleScope(soa.Self()));
}
- HGraphBuilder builder(graph, *item, handles_.get(), return_type);
+ const DexFile* dex_file = graph->GetAllocator()->Alloc<DexFile>();
+ const DexCompilationUnit* dex_compilation_unit =
+ new (graph->GetAllocator()) DexCompilationUnit(
+ handles_->NewHandle<mirror::ClassLoader>(nullptr),
+ /* class_linker */ nullptr,
+ *dex_file,
+ code_item,
+ /* class_def_index */ DexFile::kDexNoIndex16,
+ /* method_idx */ dex::kDexNoIndex,
+ /* access_flags */ 0u,
+ /* verified_method */ nullptr,
+ handles_->NewHandle<mirror::DexCache>(nullptr));
+ HGraphBuilder builder(graph, dex_compilation_unit, *code_item, handles_.get(), return_type);
bool graph_built = (builder.BuildGraph() == kAnalysisSuccess);
return graph_built ? graph : nullptr;
}
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 6d9ebc8..cb9dc42 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -18,8 +18,11 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "base/enums.h"
#include "class_linker-inl.h"
+#include "handle_scope-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
#include "scoped_thread_state_change-inl.h"
@@ -70,14 +73,16 @@
Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> hint_dex_cache,
HandleCache* handle_cache,
- ArenaVector<HInstruction*>* worklist,
bool is_first_run)
: HGraphDelegateVisitor(graph),
class_loader_(class_loader),
hint_dex_cache_(hint_dex_cache),
handle_cache_(handle_cache),
- worklist_(worklist),
- is_first_run_(is_first_run) {}
+ allocator_(graph->GetArenaStack()),
+ worklist_(allocator_.Adapter(kArenaAllocReferenceTypePropagation)),
+ is_first_run_(is_first_run) {
+ worklist_.reserve(kDefaultWorklistSize);
+ }
void VisitDeoptimize(HDeoptimize* deopt) OVERRIDE;
void VisitNewInstance(HNewInstance* new_instance) OVERRIDE;
@@ -87,9 +92,6 @@
void VisitLoadException(HLoadException* instr) OVERRIDE;
void VisitNewArray(HNewArray* instr) OVERRIDE;
void VisitParameterValue(HParameterValue* instr) OVERRIDE;
- void UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info);
- void SetClassAsTypeInfo(HInstruction* instr, ObjPtr<mirror::Class> klass, bool is_exact)
- REQUIRES_SHARED(Locks::mutator_lock_);
void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE;
@@ -99,16 +101,39 @@
void VisitCheckCast(HCheckCast* instr) OVERRIDE;
void VisitBoundType(HBoundType* instr) OVERRIDE;
void VisitNullCheck(HNullCheck* instr) OVERRIDE;
+ void VisitPhi(HPhi* phi);
+
+ void VisitBasicBlock(HBasicBlock* block);
+ void ProcessWorklist();
+
+ private:
+ void UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info);
+ void SetClassAsTypeInfo(HInstruction* instr, ObjPtr<mirror::Class> klass, bool is_exact)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void BoundTypeForIfNotNull(HBasicBlock* block);
+ static void BoundTypeForIfInstanceOf(HBasicBlock* block);
+ static bool UpdateNullability(HInstruction* instr);
+ static void UpdateBoundType(HBoundType* bound_type) REQUIRES_SHARED(Locks::mutator_lock_);
+ void UpdateArrayGet(HArrayGet* instr) REQUIRES_SHARED(Locks::mutator_lock_);
+ void UpdatePhi(HPhi* phi) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool UpdateReferenceTypeInfo(HInstruction* instr);
void UpdateReferenceTypeInfo(HInstruction* instr,
dex::TypeIndex type_idx,
const DexFile& dex_file,
bool is_exact);
- private:
+ void AddToWorklist(HInstruction* instruction);
+ void AddDependentInstructionsToWorklist(HInstruction* instruction);
+
+ static constexpr size_t kDefaultWorklistSize = 8;
+
Handle<mirror::ClassLoader> class_loader_;
Handle<mirror::DexCache> hint_dex_cache_;
- HandleCache* handle_cache_;
- ArenaVector<HInstruction*>* worklist_;
+ HandleCache* const handle_cache_;
+
+ // Use local allocator for allocating memory.
+ ScopedArenaAllocator allocator_;
+ ScopedArenaVector<HInstruction*> worklist_;
const bool is_first_run_;
};
@@ -122,7 +147,6 @@
class_loader_(class_loader),
hint_dex_cache_(hint_dex_cache),
handle_cache_(handles),
- worklist_(graph->GetAllocator()->Adapter(kArenaAllocReferenceTypePropagation)),
is_first_run_(is_first_run) {
}
@@ -158,7 +182,6 @@
class_loader_,
hint_dex_cache_,
&handle_cache_,
- &worklist_,
is_first_run_);
instruction->Accept(&visitor);
}
@@ -319,26 +342,20 @@
}
void ReferenceTypePropagation::Run() {
- worklist_.reserve(kDefaultWorklistSize);
+ RTPVisitor visitor(graph_, class_loader_, hint_dex_cache_, &handle_cache_, is_first_run_);
// To properly propagate type info we need to visit in the dominator-based order.
// Reverse post order guarantees a node's dominators are visited first.
// We take advantage of this order in `VisitBasicBlock`.
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
- VisitBasicBlock(block);
+ visitor.VisitBasicBlock(block);
}
- ProcessWorklist();
+ visitor.ProcessWorklist();
ValidateTypes();
}
-void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
- RTPVisitor visitor(graph_,
- class_loader_,
- hint_dex_cache_,
- &handle_cache_,
- &worklist_,
- is_first_run_);
+void ReferenceTypePropagation::RTPVisitor::VisitBasicBlock(HBasicBlock* block) {
// Handle Phis first as there might be instructions in the same block who depend on them.
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
VisitPhi(it.Current()->AsPhi());
@@ -348,7 +365,7 @@
// last visited instruction, use `HInstructionIteratorHandleChanges` iterator.
for (HInstructionIteratorHandleChanges it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
- instr->Accept(&visitor);
+ instr->Accept(this);
}
// Add extra nodes to bound types.
@@ -357,7 +374,7 @@
BoundTypeForClassCheck(block->GetLastInstruction());
}
-void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) {
+void ReferenceTypePropagation::RTPVisitor::BoundTypeForIfNotNull(HBasicBlock* block) {
HIf* ifInstruction = block->GetLastInstruction()->AsIf();
if (ifInstruction == nullptr) {
return;
@@ -391,7 +408,7 @@
: ifInstruction->IfFalseSuccessor();
ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
- handle_cache_.GetObjectClassHandle(), /* is_exact */ false);
+ handle_cache_->GetObjectClassHandle(), /* is_exact */ false);
BoundTypeIn(obj, notNullBlock, /* start_instruction */ nullptr, object_rti);
}
@@ -469,7 +486,7 @@
// `if (x instanceof ClassX) { }`
// If that's the case insert an HBoundType instruction to bound the type of `x`
// to `ClassX` in the scope of the dominated blocks.
-void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
+void ReferenceTypePropagation::RTPVisitor::BoundTypeForIfInstanceOf(HBasicBlock* block) {
HIf* ifInstruction = block->GetLastInstruction()->AsIf();
if (ifInstruction == nullptr) {
return;
@@ -728,7 +745,7 @@
}
}
-void ReferenceTypePropagation::VisitPhi(HPhi* phi) {
+void ReferenceTypePropagation::RTPVisitor::VisitPhi(HPhi* phi) {
if (phi->IsDead() || phi->GetType() != DataType::Type::kReference) {
return;
}
@@ -812,7 +829,7 @@
return ReferenceTypeInfo::Create(result_type_handle, is_exact);
}
-void ReferenceTypePropagation::UpdateArrayGet(HArrayGet* instr, HandleCache* handle_cache) {
+void ReferenceTypePropagation::RTPVisitor::UpdateArrayGet(HArrayGet* instr) {
DCHECK_EQ(DataType::Type::kReference, instr->GetType());
ReferenceTypeInfo parent_rti = instr->InputAt(0)->GetReferenceTypeInfo();
@@ -823,7 +840,7 @@
Handle<mirror::Class> handle = parent_rti.GetTypeHandle();
if (handle->IsObjectArrayClass() && IsAdmissible(handle->GetComponentType())) {
ReferenceTypeInfo::TypeHandle component_handle =
- handle_cache->NewHandle(handle->GetComponentType());
+ handle_cache_->NewHandle(handle->GetComponentType());
bool is_exact = component_handle->CannotBeAssignedFromOtherTypes();
instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(component_handle, is_exact));
} else {
@@ -832,7 +849,7 @@
}
}
-bool ReferenceTypePropagation::UpdateReferenceTypeInfo(HInstruction* instr) {
+bool ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction* instr) {
ScopedObjectAccess soa(Thread::Current());
ReferenceTypeInfo previous_rti = instr->GetReferenceTypeInfo();
@@ -848,7 +865,7 @@
} else if (instr->IsArrayGet()) {
// TODO: consider if it's worth "looking back" and binding the input object
// to an array type.
- UpdateArrayGet(instr->AsArrayGet(), &handle_cache_);
+ UpdateArrayGet(instr->AsArrayGet());
} else {
LOG(FATAL) << "Invalid instruction (should not get here)";
}
@@ -873,13 +890,13 @@
}
ScopedObjectAccess soa(Thread::Current());
- UpdateArrayGet(instr, handle_cache_);
+ UpdateArrayGet(instr);
if (!instr->GetReferenceTypeInfo().IsValid()) {
- worklist_->push_back(instr);
+ worklist_.push_back(instr);
}
}
-void ReferenceTypePropagation::UpdateBoundType(HBoundType* instr) {
+void ReferenceTypePropagation::RTPVisitor::UpdateBoundType(HBoundType* instr) {
ReferenceTypeInfo input_rti = instr->InputAt(0)->GetReferenceTypeInfo();
if (!input_rti.IsValid()) {
return; // No new info yet.
@@ -903,7 +920,7 @@
// NullConstant inputs are ignored during merging as they do not provide any useful information.
// If all the inputs are NullConstants then the type of the phi will be set to Object.
-void ReferenceTypePropagation::UpdatePhi(HPhi* instr) {
+void ReferenceTypePropagation::RTPVisitor::UpdatePhi(HPhi* instr) {
DCHECK(instr->IsLive());
HInputsRef inputs = instr->GetInputs();
@@ -931,7 +948,7 @@
if (inputs[i]->IsNullConstant()) {
continue;
}
- new_rti = MergeTypes(new_rti, inputs[i]->GetReferenceTypeInfo(), &handle_cache_);
+ new_rti = MergeTypes(new_rti, inputs[i]->GetReferenceTypeInfo(), handle_cache_);
if (new_rti.IsValid() && new_rti.IsObjectClass()) {
if (!new_rti.IsExact()) {
break;
@@ -948,7 +965,7 @@
// Re-computes and updates the nullability of the instruction. Returns whether or
// not the nullability was changed.
-bool ReferenceTypePropagation::UpdateNullability(HInstruction* instr) {
+bool ReferenceTypePropagation::RTPVisitor::UpdateNullability(HInstruction* instr) {
DCHECK((instr->IsPhi() && instr->AsPhi()->IsLive())
|| instr->IsBoundType()
|| instr->IsNullCheck()
@@ -976,7 +993,7 @@
return existing_can_be_null != instr->CanBeNull();
}
-void ReferenceTypePropagation::ProcessWorklist() {
+void ReferenceTypePropagation::RTPVisitor::ProcessWorklist() {
while (!worklist_.empty()) {
HInstruction* instruction = worklist_.back();
worklist_.pop_back();
@@ -988,13 +1005,14 @@
}
}
-void ReferenceTypePropagation::AddToWorklist(HInstruction* instruction) {
+void ReferenceTypePropagation::RTPVisitor::AddToWorklist(HInstruction* instruction) {
DCHECK_EQ(instruction->GetType(), DataType::Type::kReference)
<< instruction->DebugName() << ":" << instruction->GetType();
worklist_.push_back(instruction);
}
-void ReferenceTypePropagation::AddDependentInstructionsToWorklist(HInstruction* instruction) {
+void ReferenceTypePropagation::RTPVisitor::AddDependentInstructionsToWorklist(
+ HInstruction* instruction) {
for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
HInstruction* user = use.GetUser();
if ((user->IsPhi() && user->AsPhi()->IsLive())
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index c221282..fd4dad2 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -18,12 +18,10 @@
#define ART_COMPILER_OPTIMIZING_REFERENCE_TYPE_PROPAGATION_H_
#include "base/arena_containers.h"
-#include "driver/dex_compilation_unit.h"
-#include "handle_scope-inl.h"
+#include "mirror/class-inl.h"
#include "nodes.h"
#include "obj_ptr.h"
#include "optimization.h"
-#include "optimizing_compiler_stats.h"
namespace art {
@@ -91,22 +89,6 @@
class RTPVisitor;
- void VisitPhi(HPhi* phi);
- void VisitBasicBlock(HBasicBlock* block);
- void UpdateBoundType(HBoundType* bound_type) REQUIRES_SHARED(Locks::mutator_lock_);
- void UpdatePhi(HPhi* phi) REQUIRES_SHARED(Locks::mutator_lock_);
- void BoundTypeForIfNotNull(HBasicBlock* block);
- void BoundTypeForIfInstanceOf(HBasicBlock* block);
- void ProcessWorklist();
- void AddToWorklist(HInstruction* instr);
- void AddDependentInstructionsToWorklist(HInstruction* instr);
-
- bool UpdateNullability(HInstruction* instr);
- bool UpdateReferenceTypeInfo(HInstruction* instr);
-
- static void UpdateArrayGet(HArrayGet* instr, HandleCache* handle_cache)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
static ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a,
const ReferenceTypeInfo& b,
HandleCache* handle_cache)
@@ -122,13 +104,9 @@
Handle<mirror::DexCache> hint_dex_cache_;
HandleCache handle_cache_;
- ArenaVector<HInstruction*> worklist_;
-
// Whether this reference type propagation is the first run we are doing.
const bool is_first_run_;
- static constexpr size_t kDefaultWorklistSize = 8;
-
friend class ReferenceTypePropagationTest;
DISALLOW_COPY_AND_ASSIGN(ReferenceTypePropagation);
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index e284498..18ef69f 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -87,7 +87,7 @@
// to find an optimal split position.
LiveInterval* SplitBetween(LiveInterval* interval, size_t from, size_t to);
- ScopedArenaAllocator* allocator_;
+ ScopedArenaAllocator* const allocator_;
CodeGenerator* const codegen_;
const SsaLivenessAnalysis& liveness_;
};
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index c673d54..57eb762 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -781,7 +781,7 @@
#if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm)
// Phase-local allocator that allocates scheduler internal data structures like
// scheduling nodes, internel nodes map, dependencies, etc.
- ScopedArenaAllocator arena_allocator(graph_->GetArenaStack());
+ ScopedArenaAllocator allocator(graph_->GetArenaStack());
CriticalPathSchedulingNodeSelector critical_path_selector;
RandomSchedulingNodeSelector random_selector;
SchedulingNodeSelector* selector = schedule_randomly
@@ -797,7 +797,7 @@
switch (instruction_set_) {
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64: {
- arm64::HSchedulerARM64 scheduler(&arena_allocator, selector);
+ arm64::HSchedulerARM64 scheduler(&allocator, selector);
scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
scheduler.Schedule(graph_);
break;
@@ -807,7 +807,7 @@
case kThumb2:
case kArm: {
arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_);
- arm::HSchedulerARM scheduler(&arena_allocator, selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(&allocator, selector, &arm_latency_visitor);
scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
scheduler.Schedule(graph_);
break;
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index 3efd26a..afdf6f1 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -253,14 +253,14 @@
public:
SchedulingGraph(const HScheduler* scheduler, ScopedArenaAllocator* allocator)
: scheduler_(scheduler),
- arena_(allocator),
+ allocator_(allocator),
contains_scheduling_barrier_(false),
- nodes_map_(arena_->Adapter(kArenaAllocScheduler)),
+ nodes_map_(allocator_->Adapter(kArenaAllocScheduler)),
heap_location_collector_(nullptr) {}
SchedulingNode* AddNode(HInstruction* instr, bool is_scheduling_barrier = false) {
std::unique_ptr<SchedulingNode> node(
- new (arena_) SchedulingNode(instr, arena_, is_scheduling_barrier));
+ new (allocator_) SchedulingNode(instr, allocator_, is_scheduling_barrier));
SchedulingNode* result = node.get();
nodes_map_.Insert(std::make_pair(instr, std::move(node)));
contains_scheduling_barrier_ |= is_scheduling_barrier;
@@ -323,7 +323,7 @@
const HScheduler* const scheduler_;
- ScopedArenaAllocator* const arena_;
+ ScopedArenaAllocator* const allocator_;
bool contains_scheduling_barrier_;
diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc
index 0e46aec..77ec9a6 100644
--- a/compiler/optimizing/select_generator.cc
+++ b/compiler/optimizing/select_generator.cc
@@ -16,6 +16,8 @@
#include "select_generator.h"
+#include "reference_type_propagation.h"
+
namespace art {
static constexpr size_t kMaxInstructionsInBranch = 1u;
diff --git a/compiler/optimizing/select_generator.h b/compiler/optimizing/select_generator.h
index c060146..f8cf00e 100644
--- a/compiler/optimizing/select_generator.h
+++ b/compiler/optimizing/select_generator.h
@@ -58,7 +58,6 @@
#define ART_COMPILER_OPTIMIZING_SELECT_GENERATOR_H_
#include "optimization.h"
-#include "reference_type_propagation.h"
namespace art {
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index f4a8a17..e4edbfd 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -105,7 +105,7 @@
}
static void AddDependentInstructionsToWorklist(HInstruction* instruction,
- ArenaVector<HPhi*>* worklist) {
+ ScopedArenaVector<HPhi*>* worklist) {
// If `instruction` is a dead phi, type conflict was just identified. All its
// live phi users, and transitively users of those users, therefore need to be
// marked dead/conflicting too, so we add them to the worklist. Otherwise we
@@ -167,7 +167,7 @@
}
// Replace inputs of `phi` to match its type. Return false if conflict is identified.
-bool SsaBuilder::TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist) {
+bool SsaBuilder::TypeInputsOfPhi(HPhi* phi, ScopedArenaVector<HPhi*>* worklist) {
DataType::Type common_type = phi->GetType();
if (DataType::IsIntegralType(common_type)) {
// We do not need to retype ambiguous inputs because they are always constructed
@@ -213,7 +213,7 @@
// Attempt to set the primitive type of `phi` to match its inputs. Return whether
// it was changed by the algorithm or not.
-bool SsaBuilder::UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist) {
+bool SsaBuilder::UpdatePrimitiveType(HPhi* phi, ScopedArenaVector<HPhi*>* worklist) {
DCHECK(phi->IsLive());
DataType::Type original_type = phi->GetType();
@@ -233,7 +233,7 @@
}
void SsaBuilder::RunPrimitiveTypePropagation() {
- ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaVector<HPhi*> worklist(local_allocator_->Adapter(kArenaAllocGraphBuilder));
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
if (block->IsLoopHeader()) {
@@ -262,7 +262,7 @@
EquivalentPhisCleanup();
}
-void SsaBuilder::ProcessPrimitiveTypePropagationWorklist(ArenaVector<HPhi*>* worklist) {
+void SsaBuilder::ProcessPrimitiveTypePropagationWorklist(ScopedArenaVector<HPhi*>* worklist) {
// Process worklist
while (!worklist->empty()) {
HPhi* phi = worklist->back();
@@ -319,7 +319,7 @@
// uses (because they are untyped) and environment uses (if --debuggable).
// After resolving all ambiguous ArrayGets, we will re-run primitive type
// propagation on the Phis which need to be updated.
- ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaVector<HPhi*> worklist(local_allocator_->Adapter(kArenaAllocGraphBuilder));
{
ScopedObjectAccess soa(Thread::Current());
@@ -623,8 +623,7 @@
|| (next->GetType() != type)) {
ArenaAllocator* allocator = graph_->GetAllocator();
HInputsRef inputs = phi->GetInputs();
- HPhi* new_phi =
- new (allocator) HPhi(allocator, phi->GetRegNumber(), inputs.size(), type);
+ HPhi* new_phi = new (allocator) HPhi(allocator, phi->GetRegNumber(), inputs.size(), type);
// Copy the inputs. Note that the graph may not be correctly typed
// by doing this copy, but the type propagation phase will fix it.
ArrayRef<HUserRecord<HInstruction*>> new_input_records = new_phi->GetInputRecords();
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 509cdc1..60831a9 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -17,7 +17,8 @@
#ifndef ART_COMPILER_OPTIMIZING_SSA_BUILDER_H_
#define ART_COMPILER_OPTIMIZING_SSA_BUILDER_H_
-#include "base/arena_containers.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "nodes.h"
#include "optimization.h"
@@ -50,15 +51,17 @@
SsaBuilder(HGraph* graph,
Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> dex_cache,
- VariableSizedHandleScope* handles)
+ VariableSizedHandleScope* handles,
+ ScopedArenaAllocator* local_allocator)
: graph_(graph),
class_loader_(class_loader),
dex_cache_(dex_cache),
handles_(handles),
agets_fixed_(false),
- ambiguous_agets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)),
- ambiguous_asets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)),
- uninitialized_strings_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)) {
+ local_allocator_(local_allocator),
+ ambiguous_agets_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
+ ambiguous_asets_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
+ uninitialized_strings_(local_allocator->Adapter(kArenaAllocGraphBuilder)) {
graph_->InitializeInexactObjectRTI(handles);
}
@@ -105,9 +108,9 @@
// input. Returns false if the type of an array is unknown.
bool FixAmbiguousArrayOps();
- bool TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist);
- bool UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist);
- void ProcessPrimitiveTypePropagationWorklist(ArenaVector<HPhi*>* worklist);
+ bool TypeInputsOfPhi(HPhi* phi, ScopedArenaVector<HPhi*>* worklist);
+ bool UpdatePrimitiveType(HPhi* phi, ScopedArenaVector<HPhi*>* worklist);
+ void ProcessPrimitiveTypePropagationWorklist(ScopedArenaVector<HPhi*>* worklist);
HFloatConstant* GetFloatEquivalent(HIntConstant* constant);
HDoubleConstant* GetDoubleEquivalent(HLongConstant* constant);
@@ -116,7 +119,7 @@
void RemoveRedundantUninitializedStrings();
- HGraph* graph_;
+ HGraph* const graph_;
Handle<mirror::ClassLoader> class_loader_;
Handle<mirror::DexCache> dex_cache_;
VariableSizedHandleScope* const handles_;
@@ -124,9 +127,10 @@
// True if types of ambiguous ArrayGets have been resolved.
bool agets_fixed_;
- ArenaVector<HArrayGet*> ambiguous_agets_;
- ArenaVector<HArraySet*> ambiguous_asets_;
- ArenaVector<HNewInstance*> uninitialized_strings_;
+ ScopedArenaAllocator* const local_allocator_;
+ ScopedArenaVector<HArrayGet*> ambiguous_agets_;
+ ScopedArenaVector<HArraySet*> ambiguous_asets_;
+ ScopedArenaVector<HNewInstance*> uninitialized_strings_;
DISALLOW_COPY_AND_ASSIGN(SsaBuilder);
};
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 9800af7..f83bb52 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1292,7 +1292,7 @@
// Use a local ScopedArenaAllocator for allocating memory.
// This allocator must remain alive while doing register allocation.
- ScopedArenaAllocator* allocator_;
+ ScopedArenaAllocator* const allocator_;
ScopedArenaVector<BlockInfo*> block_infos_;
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index 9b78e0e..b9bfbaa 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -95,8 +95,7 @@
graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32);
HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference);
- ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 },
- GetAllocator()->Adapter());
+ HInstruction* const args[] = { array, index, value, extra_arg1, extra_arg2 };
for (HInstruction* insn : args) {
entry_->AddInstruction(insn);
}
@@ -109,7 +108,7 @@
/* method */ nullptr,
/* dex_pc */ 0u,
null_check);
- null_check_env->CopyFrom(args);
+ null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
null_check->SetRawEnvironment(null_check_env);
HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
block->AddInstruction(length);
@@ -120,7 +119,7 @@
/* method */ nullptr,
/* dex_pc */ 0u,
bounds_check);
- bounds_check_env->CopyFrom(args);
+ bounds_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
bounds_check->SetRawEnvironment(bounds_check_env);
HInstruction* array_set =
new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
@@ -144,7 +143,7 @@
// Environment uses keep the reference argument alive.
"ranges: { [10,19) }, uses: { }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0",
};
- ASSERT_EQ(arraysize(expected), args.size());
+ static_assert(arraysize(expected) == arraysize(args), "Array size check.");
size_t arg_index = 0u;
for (HInstruction* arg : args) {
std::ostringstream arg_dump;
@@ -165,8 +164,7 @@
graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32);
HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference);
- ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 },
- GetAllocator()->Adapter());
+ HInstruction* const args[] = { array, index, value, extra_arg1, extra_arg2 };
for (HInstruction* insn : args) {
entry_->AddInstruction(insn);
}
@@ -179,7 +177,7 @@
/* method */ nullptr,
/* dex_pc */ 0u,
null_check);
- null_check_env->CopyFrom(args);
+ null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
null_check->SetRawEnvironment(null_check_env);
HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
block->AddInstruction(length);
@@ -194,7 +192,7 @@
/* method */ nullptr,
/* dex_pc */ 0u,
deoptimize);
- deoptimize_env->CopyFrom(args);
+ deoptimize_env->CopyFrom(ArrayRef<HInstruction* const>(args));
deoptimize->SetRawEnvironment(deoptimize_env);
HInstruction* array_set =
new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
@@ -217,7 +215,7 @@
// Environment uses keep the reference argument alive.
"ranges: { [10,21) }, uses: { }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0",
};
- ASSERT_EQ(arraysize(expected), args.size());
+ static_assert(arraysize(expected) == arraysize(args), "Array size check.");
size_t arg_index = 0u;
for (HInstruction* arg : args) {
std::ostringstream arg_dump;
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 3b95b86..cb27ded 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -17,7 +17,8 @@
#include "ssa_phi_elimination.h"
#include "base/arena_bit_vector.h"
-#include "base/arena_containers.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "base/bit_vector-inl.h"
namespace art {
@@ -28,10 +29,17 @@
}
void SsaDeadPhiElimination::MarkDeadPhis() {
+ // Use local allocator for allocating memory used by this optimization.
+ ScopedArenaAllocator allocator(graph_->GetArenaStack());
+
+ static constexpr size_t kDefaultWorklistSize = 8;
+ ScopedArenaVector<HPhi*> worklist(allocator.Adapter(kArenaAllocSsaPhiElimination));
+ worklist.reserve(kDefaultWorklistSize);
+
// Phis are constructed live and should not be revived if previously marked
// dead. This algorithm temporarily breaks that invariant but we DCHECK that
// only phis which were initially live are revived.
- ArenaSet<HPhi*> initially_live(graph_->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination));
+ ScopedArenaSet<HPhi*> initially_live(allocator.Adapter(kArenaAllocSsaPhiElimination));
// Add to the worklist phis referenced by non-phi instructions.
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
@@ -52,7 +60,7 @@
}
if (keep_alive) {
- worklist_.push_back(phi);
+ worklist.push_back(phi);
} else {
phi->SetDead();
if (kIsDebugBuild) {
@@ -63,9 +71,9 @@
}
// Process the worklist by propagating liveness to phi inputs.
- while (!worklist_.empty()) {
- HPhi* phi = worklist_.back();
- worklist_.pop_back();
+ while (!worklist.empty()) {
+ HPhi* phi = worklist.back();
+ worklist.pop_back();
for (HInstruction* raw_input : phi->GetInputs()) {
HPhi* input = raw_input->AsPhi();
if (input != nullptr && input->IsDead()) {
@@ -73,7 +81,7 @@
// that the phi was not dead initially (see definition of `initially_live`).
DCHECK(ContainsElement(initially_live, input));
input->SetLive();
- worklist_.push_back(input);
+ worklist.push_back(input);
}
}
}
@@ -115,23 +123,31 @@
}
void SsaRedundantPhiElimination::Run() {
+ // Use local allocator for allocating memory used by this optimization.
+ ScopedArenaAllocator allocator(graph_->GetArenaStack());
+
+ static constexpr size_t kDefaultWorklistSize = 8;
+ ScopedArenaVector<HPhi*> worklist(allocator.Adapter(kArenaAllocSsaPhiElimination));
+ worklist.reserve(kDefaultWorklistSize);
+
// Add all phis in the worklist. Order does not matter for correctness, and
// neither will necessarily converge faster.
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
- worklist_.push_back(inst_it.Current()->AsPhi());
+ worklist.push_back(inst_it.Current()->AsPhi());
}
}
- ArenaBitVector visited_phis_in_cycle(graph_->GetAllocator(),
+ ArenaBitVector visited_phis_in_cycle(&allocator,
graph_->GetCurrentInstructionId(),
/* expandable */ false,
kArenaAllocSsaPhiElimination);
- ArenaVector<HPhi*> cycle_worklist(graph_->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination));
+ visited_phis_in_cycle.ClearAllBits();
+ ScopedArenaVector<HPhi*> cycle_worklist(allocator.Adapter(kArenaAllocSsaPhiElimination));
- while (!worklist_.empty()) {
- HPhi* phi = worklist_.back();
- worklist_.pop_back();
+ while (!worklist.empty()) {
+ HPhi* phi = worklist.back();
+ worklist.pop_back();
// If the phi has already been processed, continue.
if (!phi->IsInBlock()) {
@@ -231,7 +247,7 @@
for (const HUseListNode<HInstruction*>& use : current->GetUses()) {
HInstruction* user = use.GetUser();
if (user->IsPhi() && !visited_phis_in_cycle.IsBitSet(user->GetId())) {
- worklist_.push_back(user->AsPhi());
+ worklist.push_back(user->AsPhi());
}
}
DCHECK(candidate->StrictlyDominates(current));
diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h
index e0cde07..11d5837 100644
--- a/compiler/optimizing/ssa_phi_elimination.h
+++ b/compiler/optimizing/ssa_phi_elimination.h
@@ -17,7 +17,6 @@
#ifndef ART_COMPILER_OPTIMIZING_SSA_PHI_ELIMINATION_H_
#define ART_COMPILER_OPTIMIZING_SSA_PHI_ELIMINATION_H_
-#include "base/arena_containers.h"
#include "nodes.h"
#include "optimization.h"
@@ -30,10 +29,7 @@
class SsaDeadPhiElimination : public HOptimization {
public:
explicit SsaDeadPhiElimination(HGraph* graph)
- : HOptimization(graph, kSsaDeadPhiEliminationPassName),
- worklist_(graph->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination)) {
- worklist_.reserve(kDefaultWorklistSize);
- }
+ : HOptimization(graph, kSsaDeadPhiEliminationPassName) {}
void Run() OVERRIDE;
@@ -43,10 +39,6 @@
static constexpr const char* kSsaDeadPhiEliminationPassName = "dead_phi_elimination";
private:
- ArenaVector<HPhi*> worklist_;
-
- static constexpr size_t kDefaultWorklistSize = 8;
-
DISALLOW_COPY_AND_ASSIGN(SsaDeadPhiElimination);
};
@@ -59,20 +51,13 @@
class SsaRedundantPhiElimination : public HOptimization {
public:
explicit SsaRedundantPhiElimination(HGraph* graph)
- : HOptimization(graph, kSsaRedundantPhiEliminationPassName),
- worklist_(graph->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination)) {
- worklist_.reserve(kDefaultWorklistSize);
- }
+ : HOptimization(graph, kSsaRedundantPhiEliminationPassName) {}
void Run() OVERRIDE;
static constexpr const char* kSsaRedundantPhiEliminationPassName = "redundant_phi_elimination";
private:
- ArenaVector<HPhi*> worklist_;
-
- static constexpr size_t kDefaultWorklistSize = 8;
-
DISALLOW_COPY_AND_ASSIGN(SsaRedundantPhiElimination);
};
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index a574566..62ed7ee 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -223,7 +223,7 @@
size_t dex_register_locations_index) const;
void CheckCodeInfo(MemoryRegion region) const;
- ArenaAllocator* allocator_;
+ ArenaAllocator* const allocator_;
const InstructionSet instruction_set_;
ArenaVector<StackMapEntry> stack_maps_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index a842c6e..96ac368 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -47,10 +47,10 @@
TEST(StackMapTest, Test1) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
- ArenaBitVector sp_mask(&arena, 0, false);
+ ArenaBitVector sp_mask(&allocator, 0, false);
size_t number_of_dex_registers = 2;
stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location.
@@ -58,7 +58,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -128,11 +128,11 @@
TEST(StackMapTest, Test2) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
ArtMethod art_method;
- ArenaBitVector sp_mask1(&arena, 0, true);
+ ArenaBitVector sp_mask1(&allocator, 0, true);
sp_mask1.SetBit(2);
sp_mask1.SetBit(4);
size_t number_of_dex_registers = 2;
@@ -146,7 +146,7 @@
stream.EndInlineInfoEntry();
stream.EndStackMapEntry();
- ArenaBitVector sp_mask2(&arena, 0, true);
+ ArenaBitVector sp_mask2(&allocator, 0, true);
sp_mask2.SetBit(3);
sp_mask2.SetBit(8);
stream.BeginStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0);
@@ -154,7 +154,7 @@
stream.AddDexRegisterEntry(Kind::kInFpuRegister, 3); // Short location.
stream.EndStackMapEntry();
- ArenaBitVector sp_mask3(&arena, 0, true);
+ ArenaBitVector sp_mask3(&allocator, 0, true);
sp_mask3.SetBit(1);
sp_mask3.SetBit(5);
stream.BeginStackMapEntry(2, 192, 0xAB, &sp_mask3, number_of_dex_registers, 0);
@@ -162,7 +162,7 @@
stream.AddDexRegisterEntry(Kind::kInRegisterHigh, 8); // Short location.
stream.EndStackMapEntry();
- ArenaBitVector sp_mask4(&arena, 0, true);
+ ArenaBitVector sp_mask4(&allocator, 0, true);
sp_mask4.SetBit(6);
sp_mask4.SetBit(7);
stream.BeginStackMapEntry(3, 256, 0xCD, &sp_mask4, number_of_dex_registers, 0);
@@ -171,7 +171,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -412,11 +412,11 @@
TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
ArtMethod art_method;
- ArenaBitVector sp_mask1(&arena, 0, true);
+ ArenaBitVector sp_mask1(&allocator, 0, true);
sp_mask1.SetBit(2);
sp_mask1.SetBit(4);
const size_t number_of_dex_registers = 2;
@@ -431,7 +431,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -506,10 +506,10 @@
TEST(StackMapTest, TestNonLiveDexRegisters) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
- ArenaBitVector sp_mask(&arena, 0, false);
+ ArenaBitVector sp_mask(&allocator, 0, false);
uint32_t number_of_dex_registers = 2;
stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
stream.AddDexRegisterEntry(Kind::kNone, 0); // No location.
@@ -517,7 +517,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -585,10 +585,10 @@
// not treat it as kNoDexRegisterMap.
TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
- ArenaBitVector sp_mask(&arena, 0, false);
+ ArenaBitVector sp_mask(&allocator, 0, false);
uint32_t number_of_dex_registers = 1024;
// Create the first stack map (and its Dex register map).
stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
@@ -609,7 +609,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -648,10 +648,10 @@
TEST(StackMapTest, TestShareDexRegisterMap) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
- ArenaBitVector sp_mask(&arena, 0, false);
+ ArenaBitVector sp_mask(&allocator, 0, false);
uint32_t number_of_dex_registers = 2;
// First stack map.
stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
@@ -670,7 +670,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -706,10 +706,10 @@
TEST(StackMapTest, TestNoDexRegisterMap) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
- ArenaBitVector sp_mask(&arena, 0, false);
+ ArenaBitVector sp_mask(&allocator, 0, false);
uint32_t number_of_dex_registers = 0;
stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
stream.EndStackMapEntry();
@@ -719,7 +719,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -755,11 +755,11 @@
TEST(StackMapTest, InlineTest) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
ArtMethod art_method;
- ArenaBitVector sp_mask1(&arena, 0, true);
+ ArenaBitVector sp_mask1(&allocator, 0, true);
sp_mask1.SetBit(2);
sp_mask1.SetBit(4);
@@ -821,7 +821,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -936,10 +936,10 @@
TEST(StackMapTest, TestDeduplicateStackMask) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
- ArenaBitVector sp_mask(&arena, 0, true);
+ ArenaBitVector sp_mask(&allocator, 0, true);
sp_mask.SetBit(1);
sp_mask.SetBit(4);
stream.BeginStackMapEntry(0, 4, 0x3, &sp_mask, 0, 0);
@@ -948,7 +948,7 @@
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
- void* memory = arena.Alloc(size, kArenaAllocMisc);
+ void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillInCodeInfo(region);
@@ -964,10 +964,10 @@
TEST(StackMapTest, TestInvokeInfo) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- StackMapStream stream(&arena, kRuntimeISA);
+ ArenaAllocator allocator(&pool);
+ StackMapStream stream(&allocator, kRuntimeISA);
- ArenaBitVector sp_mask(&arena, 0, true);
+ ArenaBitVector sp_mask(&allocator, 0, true);
sp_mask.SetBit(1);
stream.BeginStackMapEntry(0, 4, 0x3, &sp_mask, 0, 0);
stream.AddInvoke(kSuper, 1);
@@ -980,11 +980,12 @@
stream.EndStackMapEntry();
const size_t code_info_size = stream.PrepareForFillIn();
- MemoryRegion code_info_region(arena.Alloc(code_info_size, kArenaAllocMisc), code_info_size);
+ MemoryRegion code_info_region(allocator.Alloc(code_info_size, kArenaAllocMisc), code_info_size);
stream.FillInCodeInfo(code_info_region);
const size_t method_info_size = stream.ComputeMethodInfoSize();
- MemoryRegion method_info_region(arena.Alloc(method_info_size, kArenaAllocMisc), method_info_size);
+ MemoryRegion method_info_region(allocator.Alloc(method_info_size, kArenaAllocMisc),
+ method_info_size);
stream.FillInMethodInfo(method_info_region);
CodeInfo code_info(code_info_region);
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 1e9a521..9527a60 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -244,19 +244,19 @@
EntryPointCallingConvention abi,
ThreadOffset64 offset) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
switch (isa) {
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
- return arm64::CreateTrampoline(&arena, abi, offset);
+ return arm64::CreateTrampoline(&allocator, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
- return mips64::CreateTrampoline(&arena, abi, offset);
+ return mips64::CreateTrampoline(&allocator, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
- return x86_64::CreateTrampoline(&arena, offset);
+ return x86_64::CreateTrampoline(&allocator, offset);
#endif
default:
UNUSED(abi);
@@ -270,21 +270,21 @@
EntryPointCallingConvention abi,
ThreadOffset32 offset) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
switch (isa) {
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2:
- return arm::CreateTrampoline(&arena, abi, offset);
+ return arm::CreateTrampoline(&allocator, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
- return mips::CreateTrampoline(&arena, abi, offset);
+ return mips::CreateTrampoline(&allocator, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
UNUSED(abi);
- return x86::CreateTrampoline(&arena, offset);
+ return x86::CreateTrampoline(&allocator, offset);
#endif
default:
LOG(FATAL) << "Unexpected InstructionSet: " << isa;
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index dbd35ab..e0cef85 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -252,7 +252,7 @@
// for a single, fast space check per instruction.
static const int kMinimumGap = 32;
- ArenaAllocator* allocator_;
+ ArenaAllocator* const allocator_;
uint8_t* contents_;
uint8_t* cursor_;
uint8_t* limit_;
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 11a9b91..ae7636b 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -719,8 +719,8 @@
explicit AssemblerTest() {}
void SetUp() OVERRIDE {
- arena_.reset(new ArenaAllocator(&pool_));
- assembler_.reset(CreateAssembler(arena_.get()));
+ allocator_.reset(new ArenaAllocator(&pool_));
+ assembler_.reset(CreateAssembler(allocator_.get()));
test_helper_.reset(
new AssemblerTestInfrastructure(GetArchitectureString(),
GetAssemblerCmdName(),
@@ -737,7 +737,7 @@
void TearDown() OVERRIDE {
test_helper_.reset(); // Clean up the helper.
assembler_.reset();
- arena_.reset();
+ allocator_.reset();
}
// Override this to set up any architecture-specific things, e.g., CPU revision.
@@ -1589,7 +1589,7 @@
static constexpr size_t kWarnManyCombinationsThreshold = 500;
ArenaPool pool_;
- std::unique_ptr<ArenaAllocator> arena_;
+ std::unique_ptr<ArenaAllocator> allocator_;
std::unique_ptr<Ass> assembler_;
std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 5622f89..5307d17 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -167,10 +167,10 @@
class ArmVIXLAssemblerTest : public ::testing::Test {
public:
- ArmVIXLAssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
+ ArmVIXLAssemblerTest() : pool(), allocator(&pool), assembler(&allocator) { }
ArenaPool pool;
- ArenaAllocator arena;
+ ArenaAllocator allocator;
ArmVIXLJNIMacroAssembler assembler;
};
@@ -209,18 +209,16 @@
const bool is_critical_native = false;
const char* shorty = "IIFII";
- ArenaPool pool;
- ArenaAllocator arena(&pool);
-
std::unique_ptr<JniCallingConvention> jni_conv(
- JniCallingConvention::Create(&arena,
+ JniCallingConvention::Create(&allocator,
is_static,
is_synchronized,
is_critical_native,
shorty,
kThumb2));
std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
- ManagedRuntimeCallingConvention::Create(&arena, is_static, is_synchronized, shorty, kThumb2));
+ ManagedRuntimeCallingConvention::Create(
+ &allocator, is_static, is_synchronized, shorty, kThumb2));
const int frame_size(jni_conv->FrameSize());
ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters();
diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h
index ba95e21..34ab4c3 100644
--- a/compiler/utils/jni_macro_assembler_test.h
+++ b/compiler/utils/jni_macro_assembler_test.h
@@ -58,8 +58,8 @@
explicit JNIMacroAssemblerTest() {}
void SetUp() OVERRIDE {
- arena_.reset(new ArenaAllocator(&pool_));
- assembler_.reset(CreateAssembler(arena_.get()));
+ allocator_.reset(new ArenaAllocator(&pool_));
+ assembler_.reset(CreateAssembler(allocator_.get()));
test_helper_.reset(
new AssemblerTestInfrastructure(GetArchitectureString(),
GetAssemblerCmdName(),
@@ -76,7 +76,7 @@
void TearDown() OVERRIDE {
test_helper_.reset(); // Clean up the helper.
assembler_.reset();
- arena_.reset();
+ allocator_.reset();
}
// Override this to set up any architecture-specific things, e.g., CPU revision.
@@ -140,7 +140,7 @@
}
ArenaPool pool_;
- std::unique_ptr<ArenaAllocator> arena_;
+ std::unique_ptr<ArenaAllocator> allocator_;
std::unique_ptr<Ass> assembler_;
std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
diff --git a/compiler/utils/test_dex_file_builder.h b/compiler/utils/test_dex_file_builder.h
index 9ba3903..e6501e0 100644
--- a/compiler/utils/test_dex_file_builder.h
+++ b/compiler/utils/test_dex_file_builder.h
@@ -26,7 +26,8 @@
#include "base/bit_utils.h"
#include "base/logging.h"
-#include "dex_file.h"
+#include "dex_file_loader.h"
+#include "native_dex_file.h"
namespace art {
@@ -88,8 +89,8 @@
} header_data;
std::memset(header_data.data, 0, sizeof(header_data.data));
DexFile::Header* header = reinterpret_cast<DexFile::Header*>(&header_data.data);
- std::copy_n(DexFile::kDexMagic, 4u, header->magic_);
- std::copy_n(DexFile::kDexMagicVersions[0], 4u, header->magic_ + 4u);
+ std::copy_n(NativeDexFile::kDexMagic, 4u, header->magic_);
+ std::copy_n(NativeDexFile::kDexMagicVersions[0], 4u, header->magic_ + 4u);
header->header_size_ = sizeof(DexFile::Header);
header->endian_tag_ = DexFile::kDexEndianConstant;
header->link_size_ = 0u; // Unused.
@@ -231,7 +232,7 @@
static constexpr bool kVerify = false;
static constexpr bool kVerifyChecksum = false;
std::string error_msg;
- std::unique_ptr<const DexFile> dex_file(DexFile::Open(
+ std::unique_ptr<const DexFile> dex_file(DexFileLoader::Open(
&dex_file_data_[0],
dex_file_data_.size(),
dex_location,
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index cccde37..e232add 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -24,8 +24,8 @@
TEST(AssemblerX86, CreateBuffer) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- AssemblerBuffer buffer(&arena);
+ ArenaAllocator allocator(&pool);
+ AssemblerBuffer buffer(&allocator);
AssemblerBuffer::EnsureCapacity ensured(&buffer);
buffer.Emit<uint8_t>(0x42);
ASSERT_EQ(static_cast<size_t>(1), buffer.Size());
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index b08ba4a..0cb3ffd 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -30,8 +30,8 @@
TEST(AssemblerX86_64, CreateBuffer) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- AssemblerBuffer buffer(&arena);
+ ArenaAllocator allocator(&pool);
+ AssemblerBuffer buffer(&allocator);
AssemblerBuffer::EnsureCapacity ensured(&buffer);
buffer.Emit<uint8_t>(0x42);
ASSERT_EQ(static_cast<size_t>(1), buffer.Size());
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index f20e934..ae7ebe2 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -28,6 +28,7 @@
#include "base/macros.h"
#include "base/unix_file/fd_file.h"
#include "dex_file-inl.h"
+#include "dex_file_loader.h"
#include "jit/profile_compilation_info.h"
#include "method_reference.h"
#include "runtime.h"
@@ -62,7 +63,11 @@
for (const std::string& dex : GetLibCoreDexFileNames()) {
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::string error_msg;
- CHECK(DexFile::Open(dex.c_str(), dex, /*verify_checksum*/ false, &error_msg, &dex_files))
+ CHECK(DexFileLoader::Open(dex.c_str(),
+ dex,
+ /*verify_checksum*/ false,
+ &error_msg,
+ &dex_files))
<< error_msg;
for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) {
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 5bf3513..1b731fc 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -33,6 +33,7 @@
#include "dex2oat_environment_test.h"
#include "dex2oat_return_codes.h"
#include "dex_file-inl.h"
+#include "dex_file_loader.h"
#include "jit/profile_compilation_info.h"
#include "oat.h"
#include "oat_file.h"
@@ -677,7 +678,7 @@
const char* location = dex_location.c_str();
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_TRUE(DexFile::Open(location, location, true, &error_msg, &dex_files));
+ ASSERT_TRUE(DexFileLoader::Open(location, location, true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& dex_file = dex_files[0];
GenerateProfile(profile_location,
@@ -811,7 +812,7 @@
const char* location = dex_location.c_str();
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_TRUE(DexFile::Open(location, location, true, &error_msg, &dex_files));
+ ASSERT_TRUE(DexFileLoader::Open(location, location, true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& old_dex_file = dex_files[0];
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index dfbe31a..05af442 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -34,6 +34,7 @@
#include "debug/method_debug_info.h"
#include "dex/verification_results.h"
#include "dex_file-inl.h"
+#include "dex_file_loader.h"
#include "dex_file_types.h"
#include "dexlayout.h"
#include "driver/compiler_driver-inl.h"
@@ -52,6 +53,7 @@
#include "mirror/class_loader.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
+#include "native_dex_file.h"
#include "oat_quick_method_header.h"
#include "os.h"
#include "safe_map.h"
@@ -415,7 +417,7 @@
if (fd.Fd() == -1) {
PLOG(ERROR) << "Failed to read magic number from dex file: '" << filename << "'";
return false;
- } else if (IsDexMagic(magic)) {
+ } else if (DexFileLoader::IsValidMagic(magic)) {
// The file is open for reading, not writing, so it's OK to let the File destructor
// close it without checking for explicit Close(), so pass checkUsage = false.
raw_dex_files_.emplace_back(new File(fd.Release(), location, /* checkUsage */ false));
@@ -447,13 +449,13 @@
return false;
}
for (size_t i = 0; ; ++i) {
- std::string entry_name = DexFile::GetMultiDexClassesDexName(i);
+ std::string entry_name = DexFileLoader::GetMultiDexClassesDexName(i);
std::unique_ptr<ZipEntry> entry(zip_archive->Find(entry_name.c_str(), &error_msg));
if (entry == nullptr) {
break;
}
zipped_dex_files_.push_back(std::move(entry));
- zipped_dex_file_locations_.push_back(DexFile::GetMultiDexLocation(i, location));
+ zipped_dex_file_locations_.push_back(DexFileLoader::GetMultiDexLocation(i, location));
const char* full_location = zipped_dex_file_locations_.back().c_str();
oat_dex_files_.emplace_back(full_location,
DexFileSource(zipped_dex_files_.back().get()),
@@ -478,12 +480,13 @@
LOG(ERROR) << "Unexpected number of dex files in vdex " << location;
return false;
}
- if (!DexFile::IsMagicValid(current_dex_data)) {
+
+ if (!DexFileLoader::IsValidMagic(current_dex_data)) {
LOG(ERROR) << "Invalid magic in vdex file created from " << location;
return false;
}
// We used `zipped_dex_file_locations_` to keep the strings in memory.
- zipped_dex_file_locations_.push_back(DexFile::GetMultiDexLocation(i, location));
+ zipped_dex_file_locations_.push_back(DexFileLoader::GetMultiDexLocation(i, location));
const char* full_location = zipped_dex_file_locations_.back().c_str();
oat_dex_files_.emplace_back(full_location,
DexFileSource(current_dex_data),
@@ -3107,11 +3110,12 @@
}
bool OatWriter::ValidateDexFileHeader(const uint8_t* raw_header, const char* location) {
- if (!DexFile::IsMagicValid(raw_header)) {
+ const bool valid_native_dex_magic = NativeDexFile::IsMagicValid(raw_header);
+ if (!valid_native_dex_magic) {
LOG(ERROR) << "Invalid magic number in dex file header. " << " File: " << location;
return false;
}
- if (!DexFile::IsVersionValid(raw_header)) {
+ if (!NativeDexFile::IsVersionValid(raw_header)) {
LOG(ERROR) << "Invalid version number in dex file header. " << " File: " << location;
return false;
}
@@ -3242,12 +3246,12 @@
LOG(ERROR) << "Failed to extract dex file to mem map for layout: " << error_msg;
return false;
}
- dex_file = DexFile::Open(location,
- zip_entry->GetCrc32(),
- std::move(mem_map),
- /* verify */ true,
- /* verify_checksum */ true,
- &error_msg);
+ dex_file = DexFileLoader::Open(location,
+ zip_entry->GetCrc32(),
+ std::move(mem_map),
+ /* verify */ true,
+ /* verify_checksum */ true,
+ &error_msg);
} else if (oat_dex_file->source_.IsRawFile()) {
File* raw_file = oat_dex_file->source_.GetRawFile();
int dup_fd = dup(raw_file->Fd());
@@ -3255,7 +3259,7 @@
PLOG(ERROR) << "Failed to dup dex file descriptor (" << raw_file->Fd() << ") at " << location;
return false;
}
- dex_file = DexFile::OpenDex(dup_fd, location, /* verify_checksum */ true, &error_msg);
+ dex_file = DexFileLoader::OpenDex(dup_fd, location, /* verify_checksum */ true, &error_msg);
} else {
// The source data is a vdex file.
CHECK(oat_dex_file->source_.IsRawData())
@@ -3267,14 +3271,14 @@
DCHECK(ValidateDexFileHeader(raw_dex_file, oat_dex_file->GetLocation()));
const UnalignedDexFileHeader* header = AsUnalignedDexFileHeader(raw_dex_file);
// Since the source may have had its layout changed, or may be quickened, don't verify it.
- dex_file = DexFile::Open(raw_dex_file,
- header->file_size_,
- location,
- oat_dex_file->dex_file_location_checksum_,
- nullptr,
- /* verify */ false,
- /* verify_checksum */ false,
- &error_msg);
+ dex_file = DexFileLoader::Open(raw_dex_file,
+ header->file_size_,
+ location,
+ oat_dex_file->dex_file_location_checksum_,
+ nullptr,
+ /* verify */ false,
+ /* verify_checksum */ false,
+ &error_msg);
}
if (dex_file == nullptr) {
LOG(ERROR) << "Failed to open dex file for layout: " << error_msg;
@@ -3532,14 +3536,14 @@
}
// Now, open the dex file.
- dex_files.emplace_back(DexFile::Open(raw_dex_file,
- oat_dex_file.dex_file_size_,
- oat_dex_file.GetLocation(),
- oat_dex_file.dex_file_location_checksum_,
- /* oat_dex_file */ nullptr,
- verify,
- verify,
- &error_msg));
+ dex_files.emplace_back(DexFileLoader::Open(raw_dex_file,
+ oat_dex_file.dex_file_size_,
+ oat_dex_file.GetLocation(),
+ oat_dex_file.dex_file_location_checksum_,
+ /* oat_dex_file */ nullptr,
+ verify,
+ verify,
+ &error_msg));
if (dex_files.back() == nullptr) {
LOG(ERROR) << "Failed to open dex file from oat file. File: " << oat_dex_file.GetLocation()
<< " Error: " << error_msg;
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index 50434ef..a19057a 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -26,6 +26,7 @@
#include "compiled_method-inl.h"
#include "compiler.h"
#include "debug/method_debug_info.h"
+#include "dex_file_loader.h"
#include "dex/quick_compiler_callbacks.h"
#include "dex/verification_results.h"
#include "driver/compiler_driver.h"
@@ -745,14 +746,14 @@
ASSERT_EQ(0, memcmp(&dex_file1_data->GetHeader(),
&opened_dex_file1->GetHeader(),
dex_file1_data->GetHeader().file_size_));
- ASSERT_EQ(DexFile::GetMultiDexLocation(0, zip_file.GetFilename().c_str()),
+ ASSERT_EQ(DexFileLoader::GetMultiDexLocation(0, zip_file.GetFilename().c_str()),
opened_dex_file1->GetLocation());
ASSERT_EQ(dex_file2_data->GetHeader().file_size_, opened_dex_file2->GetHeader().file_size_);
ASSERT_EQ(0, memcmp(&dex_file2_data->GetHeader(),
&opened_dex_file2->GetHeader(),
dex_file2_data->GetHeader().file_size_));
- ASSERT_EQ(DexFile::GetMultiDexLocation(1, zip_file.GetFilename().c_str()),
+ ASSERT_EQ(DexFileLoader::GetMultiDexLocation(1, zip_file.GetFilename().c_str()),
opened_dex_file2->GetLocation());
}
}
@@ -794,14 +795,14 @@
ASSERT_EQ(0, memcmp(&dex_file1_data->GetHeader(),
&opened_dex_file1->GetHeader(),
dex_file1_data->GetHeader().file_size_));
- ASSERT_EQ(DexFile::GetMultiDexLocation(0, zip_file.GetFilename().c_str()),
+ ASSERT_EQ(DexFileLoader::GetMultiDexLocation(0, zip_file.GetFilename().c_str()),
opened_dex_file1->GetLocation());
ASSERT_EQ(dex_file2_data->GetHeader().file_size_, opened_dex_file2->GetHeader().file_size_);
ASSERT_EQ(0, memcmp(&dex_file2_data->GetHeader(),
&opened_dex_file2->GetHeader(),
dex_file2_data->GetHeader().file_size_));
- ASSERT_EQ(DexFile::GetMultiDexLocation(1, zip_file.GetFilename().c_str()),
+ ASSERT_EQ(DexFileLoader::GetMultiDexLocation(1, zip_file.GetFilename().c_str()),
opened_dex_file2->GetLocation());
}
}
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 7599d23..3648a3e 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -45,6 +45,7 @@
#include "android-base/stringprintf.h"
#include "dex_file-inl.h"
+#include "dex_file_loader.h"
#include "dex_file_types.h"
#include "dex_instruction-inl.h"
#include "dexdump_cfg.h"
@@ -1825,7 +1826,7 @@
fputs("Opened '", gOutFile);
fputs(fileName, gOutFile);
if (n > 1) {
- fprintf(gOutFile, ":%s", DexFile::GetMultiDexClassesDexName(i).c_str());
+ fprintf(gOutFile, ":%s", DexFileLoader::GetMultiDexClassesDexName(i).c_str());
}
fprintf(gOutFile, "', DEX version '%.3s'\n", pDexFile->GetHeader().magic_ + 4);
}
@@ -1882,7 +1883,7 @@
const bool kVerifyChecksum = !gOptions.ignoreBadChecksum;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!DexFile::Open(fileName, fileName, kVerifyChecksum, &error_msg, &dex_files)) {
+ if (!DexFileLoader::Open(fileName, fileName, kVerifyChecksum, &error_msg, &dex_files)) {
// Display returned error message to user. Note that this error behavior
// differs from the error messages shown by the original Dalvik dexdump.
fputs(error_msg.c_str(), stderr);
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 095c960..ade0072 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -35,6 +35,7 @@
#include "dex_file-inl.h"
#include "dex_file_layout.h"
+#include "dex_file_loader.h"
#include "dex_file_types.h"
#include "dex_file_verifier.h"
#include "dex_instruction-inl.h"
@@ -1929,14 +1930,14 @@
// Verify the output dex file's structure for debug builds.
if (kIsDebugBuild) {
std::string location = "memory mapped file for " + dex_file_location;
- std::unique_ptr<const DexFile> output_dex_file(DexFile::Open(mem_map_->Begin(),
- mem_map_->Size(),
- location,
- header_->Checksum(),
- /*oat_dex_file*/ nullptr,
- /*verify*/ true,
- /*verify_checksum*/ false,
- &error_msg));
+ std::unique_ptr<const DexFile> output_dex_file(DexFileLoader::Open(mem_map_->Begin(),
+ mem_map_->Size(),
+ location,
+ header_->Checksum(),
+ /*oat_dex_file*/ nullptr,
+ /*verify*/ true,
+ /*verify_checksum*/ false,
+ &error_msg));
DCHECK(output_dex_file != nullptr) << "Failed to re-open output file:" << error_msg;
}
// Do IR-level comparison between input and output. This check ignores potential differences
@@ -1998,7 +1999,7 @@
const bool verify_checksum = !options_.ignore_bad_checksum_;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!DexFile::Open(file_name, file_name, verify_checksum, &error_msg, &dex_files)) {
+ if (!DexFileLoader::Open(file_name, file_name, verify_checksum, &error_msg, &dex_files)) {
// Display returned error message to user. Note that this error behavior
// differs from the error messages shown by the original Dalvik dexdump.
fputs(error_msg.c_str(), stderr);
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 336eb5f..f8fa893 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -24,6 +24,7 @@
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
#include "dex_file-inl.h"
+#include "dex_file_loader.h"
#include "exec_utils.h"
#include "jit/profile_compilation_info.h"
#include "utils.h"
@@ -322,11 +323,11 @@
const std::string& dex_location) {
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::string error_msg;
- bool result = DexFile::Open(input_dex.c_str(),
- input_dex,
- false,
- &error_msg,
- &dex_files);
+ bool result = DexFileLoader::Open(input_dex.c_str(),
+ input_dex,
+ false,
+ &error_msg,
+ &dex_files);
ASSERT_TRUE(result) << error_msg;
ASSERT_GE(dex_files.size(), 1u);
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index 6a1e22a..e587052 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -27,6 +27,7 @@
#include <stdlib.h>
#include "dex_file-inl.h"
+#include "dex_file_loader.h"
#include "mem_map.h"
#include "runtime.h"
@@ -178,7 +179,7 @@
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!DexFile::Open(fileName, fileName, kVerifyChecksum, &error_msg, &dex_files)) {
+ if (!DexFileLoader::Open(fileName, fileName, kVerifyChecksum, &error_msg, &dex_files)) {
fputs(error_msg.c_str(), stderr);
fputc('\n', stderr);
return -1;
diff --git a/dexoptanalyzer/dexoptanalyzer.cc b/dexoptanalyzer/dexoptanalyzer.cc
index 51a67ca..08d38d5 100644
--- a/dexoptanalyzer/dexoptanalyzer.cc
+++ b/dexoptanalyzer/dexoptanalyzer.cc
@@ -97,6 +97,10 @@
UsageError(" --android-data=<directory>: optional, the directory which should be used as");
UsageError(" android-data. By default ANDROID_DATA env variable is used.");
UsageError("");
+ UsageError(" --oat-fd=number: file descriptor of the oat file which should be analyzed");
+ UsageError("");
+ UsageError(" --vdex-fd=number: file descriptor of the vdex file corresponding to the oat file");
+ UsageError("");
UsageError(" --downgrade: optional, if the purpose of dexopt is to downgrade the dex file");
UsageError(" By default, dexopt considers upgrade case.");
UsageError("");
@@ -167,6 +171,10 @@
setenv("ANDROID_DATA", new_android_data.c_str(), 1);
} else if (option.starts_with("--downgrade")) {
downgrade_ = true;
+ } else if (option.starts_with("--oat-fd")) {
+ oat_fd_ = std::stoi(option.substr(strlen("--oat-fd=")).ToString(), nullptr, 0);
+ } else if (option.starts_with("--vdex-fd")) {
+ vdex_fd_ = std::stoi(option.substr(strlen("--vdex-fd=")).ToString(), nullptr, 0);
} else { Usage("Unknown argument '%s'", option.data()); }
}
@@ -181,6 +189,12 @@
Usage("--image unspecified and ANDROID_ROOT not set or image file does not exist.");
}
}
+ if (oat_fd_ > 0 && vdex_fd_ < 0) {
+ Usage("A valid --vdex-fd must also be provided with --oat-fd.");
+ }
+ if (oat_fd_ < 0 && vdex_fd_ > 0) {
+ Usage("A valid --oat-fd must also be provided with --vdex-fd.");
+ }
}
bool CreateRuntime() {
@@ -223,15 +237,26 @@
}
std::unique_ptr<Runtime> runtime(Runtime::Current());
- OatFileAssistant oat_file_assistant(dex_file_.c_str(), isa_, /*load_executable*/ false);
+ std::unique_ptr<OatFileAssistant> oat_file_assistant;
+ if (oat_fd_ != -1 && vdex_fd_ != -1) {
+ oat_file_assistant = std::make_unique<OatFileAssistant>(dex_file_.c_str(),
+ isa_,
+ false /*load_executable*/,
+ vdex_fd_,
+ oat_fd_);
+ } else {
+ oat_file_assistant = std::make_unique<OatFileAssistant>(dex_file_.c_str(),
+ isa_,
+ false /*load_executable*/);
+ }
// Always treat elements of the bootclasspath as up-to-date.
// TODO(calin): this check should be in OatFileAssistant.
- if (oat_file_assistant.IsInBootClassPath()) {
+ if (oat_file_assistant->IsInBootClassPath()) {
return kNoDexOptNeeded;
}
// TODO(calin): Pass the class loader context as an argument to dexoptanalyzer. b/62269291.
- int dexoptNeeded = oat_file_assistant.GetDexOptNeeded(
+ int dexoptNeeded = oat_file_assistant->GetDexOptNeeded(
compiler_filter_, assume_profile_changed_, downgrade_);
// Convert OatFileAssitant codes to dexoptanalyzer codes.
@@ -258,6 +283,8 @@
bool assume_profile_changed_;
bool downgrade_;
std::string image_;
+ int oat_fd_ = -1;
+ int vdex_fd_ = -1;
};
static int dexoptAnalyze(int argc, char** argv) {
diff --git a/openjdkjvmti/fixed_up_dex_file.cc b/openjdkjvmti/fixed_up_dex_file.cc
index 5bfa5ca..c498869 100644
--- a/openjdkjvmti/fixed_up_dex_file.cc
+++ b/openjdkjvmti/fixed_up_dex_file.cc
@@ -30,6 +30,7 @@
*/
#include "fixed_up_dex_file.h"
+#include "dex_file_loader.h"
#include "dex_file-inl.h"
// Runtime includes.
@@ -68,7 +69,7 @@
data.resize(original.Size());
memcpy(data.data(), original.Begin(), original.Size());
std::string error;
- std::unique_ptr<const art::DexFile> new_dex_file(art::DexFile::Open(
+ std::unique_ptr<const art::DexFile> new_dex_file(art::DexFileLoader::Open(
data.data(),
data.size(),
/*location*/"Unquickening_dexfile.dex",
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index daf4a8b..5f29416 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -43,6 +43,7 @@
#include "class_table-inl.h"
#include "common_throws.h"
#include "dex_file_annotations.h"
+#include "dex_file_loader.h"
#include "events-inl.h"
#include "fixed_up_dex_file.h"
#include "gc/heap-visit-objects-inl.h"
@@ -106,12 +107,12 @@
}
uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
std::string map_name = map->GetName();
- std::unique_ptr<const art::DexFile> dex_file(art::DexFile::Open(map_name,
- checksum,
- std::move(map),
- /*verify*/true,
- /*verify_checksum*/true,
- &error_msg));
+ std::unique_ptr<const art::DexFile> dex_file(art::DexFileLoader::Open(map_name,
+ checksum,
+ std::move(map),
+ /*verify*/true,
+ /*verify_checksum*/true,
+ &error_msg));
if (dex_file.get() == nullptr) {
LOG(WARNING) << "Unable to load modified dex file for " << descriptor << ": " << error_msg;
art::ThrowClassFormatError(nullptr,
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 98fad80..53abfbc 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -44,6 +44,7 @@
#include "class_linker-inl.h"
#include "debugger.h"
#include "dex_file.h"
+#include "dex_file_loader.h"
#include "dex_file_types.h"
#include "events-inl.h"
#include "gc/allocation_listener.h"
@@ -425,12 +426,12 @@
return ERR(INVALID_CLASS_FORMAT);
}
uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
- std::unique_ptr<const art::DexFile> dex_file(art::DexFile::Open(map->GetName(),
- checksum,
- std::move(map),
- /*verify*/true,
- /*verify_checksum*/true,
- error_msg_));
+ std::unique_ptr<const art::DexFile> dex_file(art::DexFileLoader::Open(map->GetName(),
+ checksum,
+ std::move(map),
+ /*verify*/true,
+ /*verify_checksum*/true,
+ error_msg_));
if (dex_file.get() == nullptr) {
os << "Unable to load modified dex file for " << def.GetName() << ": " << *error_msg_;
*error_msg_ = os.str();
diff --git a/openjdkjvmti/ti_search.cc b/openjdkjvmti/ti_search.cc
index 25bc5d6..bafc855 100644
--- a/openjdkjvmti/ti_search.cc
+++ b/openjdkjvmti/ti_search.cc
@@ -39,6 +39,7 @@
#include "base/macros.h"
#include "class_linker.h"
#include "dex_file.h"
+#include "dex_file_loader.h"
#include "jni_internal.h"
#include "mirror/class-inl.h"
#include "mirror/object.h"
@@ -226,7 +227,7 @@
std::string error_msg;
std::vector<std::unique_ptr<const art::DexFile>> dex_files;
- if (!art::DexFile::Open(segment, segment, true, &error_msg, &dex_files)) {
+ if (!art::DexFileLoader::Open(segment, segment, true, &error_msg, &dex_files)) {
LOG(WARNING) << "Could not open " << segment << " for boot classpath extension: " << error_msg;
return ERR(ILLEGAL_ARGUMENT);
}
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 73724b2..642d26e 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -35,7 +35,7 @@
class ProfileAssistantTest : public CommonRuntimeTest {
public:
void PostRuntimeCreate() OVERRIDE {
- arena_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
+ allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
}
protected:
@@ -108,7 +108,7 @@
// Creates an inline cache which will be destructed at the end of the test.
ProfileCompilationInfo::InlineCacheMap* CreateInlineCacheMap() {
used_inline_caches.emplace_back(new ProfileCompilationInfo::InlineCacheMap(
- std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)));
+ std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)));
return used_inline_caches.back().get();
}
@@ -122,13 +122,13 @@
// Monomorphic
for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
ic_map->Put(dex_pc, dex_pc_data);
}
// Polymorphic
for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
dex_pc_data.AddClass(1, dex::TypeIndex(1));
@@ -136,13 +136,13 @@
}
// Megamorphic
for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMegamorphic();
ic_map->Put(dex_pc, dex_pc_data);
}
// Missing types
for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMissingTypes();
ic_map->Put(dex_pc, dex_pc_data);
}
@@ -375,7 +375,7 @@
return ProcessProfiles(profile_fds, reference_profile_fd);
}
- std::unique_ptr<ArenaAllocator> arena_;
+ std::unique_ptr<ArenaAllocator> allocator_;
// Cache of inline caches generated during tests.
// This makes it easier to pass data between different utilities and ensure that
diff --git a/profman/profman.cc b/profman/profman.cc
index 9b4f579..8ccf7b4 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -39,6 +39,7 @@
#include "boot_image_profile.h"
#include "bytecode_utils.h"
#include "dex_file.h"
+#include "dex_file_loader.h"
#include "dex_file_types.h"
#include "jit/profile_compilation_info.h"
#include "profile_assistant.h"
@@ -328,21 +329,21 @@
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files_for_location;
if (use_apk_fd_list) {
- if (DexFile::OpenZip(apks_fd_[i],
- dex_locations_[i],
- kVerifyChecksum,
- &error_msg,
- &dex_files_for_location)) {
+ if (DexFileLoader::OpenZip(apks_fd_[i],
+ dex_locations_[i],
+ kVerifyChecksum,
+ &error_msg,
+ &dex_files_for_location)) {
} else {
LOG(WARNING) << "OpenZip failed for '" << dex_locations_[i] << "' " << error_msg;
continue;
}
} else {
- if (DexFile::Open(apk_files_[i].c_str(),
- dex_locations_[i],
- kVerifyChecksum,
- &error_msg,
- &dex_files_for_location)) {
+ if (DexFileLoader::Open(apk_files_[i].c_str(),
+ dex_locations_[i],
+ kVerifyChecksum,
+ &error_msg,
+ &dex_files_for_location)) {
} else {
LOG(WARNING) << "Open failed for '" << dex_locations_[i] << "' " << error_msg;
continue;
@@ -795,7 +796,7 @@
const DexFile* dex_file = class_ref.dex_file;
const auto& dex_resolved_classes = resolved_class_set.emplace(
dex_file->GetLocation(),
- dex_file->GetBaseLocation(),
+ DexFileLoader::GetBaseLocation(dex_file->GetLocation()),
dex_file->GetLocationChecksum(),
dex_file->NumMethodIds());
dex_resolved_classes.first->AddClass(class_ref.TypeIndex());
diff --git a/runtime/Android.bp b/runtime/Android.bp
index ea776e7..ed9906a 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -55,6 +55,7 @@
"compiler_filter.cc",
"debugger.cc",
"dex_file.cc",
+ "dex_file_loader.cc",
"dex_file_annotations.cc",
"dex_file_layout.cc",
"dex_file_tracking_registrar.cc",
@@ -152,6 +153,7 @@
"mirror/throwable.cc",
"monitor.cc",
"native_bridge_art_interface.cc",
+ "native_dex_file.cc",
"native_stack_dump.cc",
"native/dalvik_system_DexFile.cc",
"native/dalvik_system_VMDebug.cc",
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index c48e30f..2e35f8a 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -72,6 +72,7 @@
"InductionVar ",
"BCE ",
"DCE ",
+ "LSA ",
"LSE ",
"CFRE ",
"LICM ",
@@ -296,7 +297,7 @@
void ArenaPool::ReclaimMemory() {
while (free_arenas_ != nullptr) {
- auto* arena = free_arenas_;
+ Arena* arena = free_arenas_;
free_arenas_ = free_arenas_->next_;
delete arena;
}
@@ -330,7 +331,7 @@
ScopedTrace trace(__PRETTY_FUNCTION__);
// Doesn't work for malloc.
MutexLock lock(Thread::Current(), lock_);
- for (auto* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
+ for (Arena* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
arena->Release();
}
}
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 212edfb..a327cb0 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -79,6 +79,7 @@
kArenaAllocInductionVarAnalysis,
kArenaAllocBoundsCheckElimination,
kArenaAllocDCE,
+ kArenaAllocLSA,
kArenaAllocLSE,
kArenaAllocCFRE,
kArenaAllocLICM,
diff --git a/runtime/base/arena_allocator_test.cc b/runtime/base/arena_allocator_test.cc
index 6bf56c8..68e26af 100644
--- a/runtime/base/arena_allocator_test.cc
+++ b/runtime/base/arena_allocator_test.cc
@@ -34,8 +34,8 @@
TEST_F(ArenaAllocatorTest, Test) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
- ArenaBitVector bv(&arena, 10, true);
+ ArenaAllocator allocator(&pool);
+ ArenaBitVector bv(&allocator, 10, true);
bv.SetBit(5);
EXPECT_EQ(1U, bv.GetStorageSize());
bv.SetBit(35);
@@ -50,14 +50,14 @@
uint32_t* small_array;
{
// Allocate a small array from an arena and release it.
- ArenaAllocator arena(&pool);
- small_array = arena.AllocArray<uint32_t>(kSmallArraySize);
+ ArenaAllocator allocator(&pool);
+ small_array = allocator.AllocArray<uint32_t>(kSmallArraySize);
ASSERT_EQ(0u, small_array[kSmallArraySize - 1u]);
}
{
// Reuse the previous arena and allocate more than previous allocation including red zone.
- ArenaAllocator arena(&pool);
- uint32_t* large_array = arena.AllocArray<uint32_t>(kLargeArraySize);
+ ArenaAllocator allocator(&pool);
+ uint32_t* large_array = allocator.AllocArray<uint32_t>(kLargeArraySize);
ASSERT_EQ(0u, large_array[kLargeArraySize - 1u]);
// Verify that the allocation was made on the same arena.
ASSERT_EQ(small_array, large_array);
@@ -72,70 +72,72 @@
{
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
// Note: Leaving some space for memory tool red zones.
- void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 5 / 8);
- void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 2 / 8);
+ void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 5 / 8);
+ void* alloc2 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 2 / 8);
ASSERT_NE(alloc1, alloc2);
- ASSERT_EQ(1u, NumberOfArenas(&arena));
+ ASSERT_EQ(1u, NumberOfArenas(&allocator));
}
{
ArenaPool pool;
- ArenaAllocator arena(&pool);
- void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
- void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 11 / 16);
+ ArenaAllocator allocator(&pool);
+ void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
+ void* alloc2 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 11 / 16);
ASSERT_NE(alloc1, alloc2);
- ASSERT_EQ(2u, NumberOfArenas(&arena));
- void* alloc3 = arena.Alloc(arena_allocator::kArenaDefaultSize * 7 / 16);
+ ASSERT_EQ(2u, NumberOfArenas(&allocator));
+ void* alloc3 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 7 / 16);
ASSERT_NE(alloc1, alloc3);
ASSERT_NE(alloc2, alloc3);
- ASSERT_EQ(3u, NumberOfArenas(&arena));
+ ASSERT_EQ(3u, NumberOfArenas(&allocator));
}
{
ArenaPool pool;
- ArenaAllocator arena(&pool);
- void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
- void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16);
+ ArenaAllocator allocator(&pool);
+ void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
+ void* alloc2 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16);
ASSERT_NE(alloc1, alloc2);
- ASSERT_EQ(2u, NumberOfArenas(&arena));
+ ASSERT_EQ(2u, NumberOfArenas(&allocator));
// Note: Leaving some space for memory tool red zones.
- void* alloc3 = arena.Alloc(arena_allocator::kArenaDefaultSize * 5 / 16);
+ void* alloc3 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 5 / 16);
ASSERT_NE(alloc1, alloc3);
ASSERT_NE(alloc2, alloc3);
- ASSERT_EQ(2u, NumberOfArenas(&arena));
+ ASSERT_EQ(2u, NumberOfArenas(&allocator));
}
{
ArenaPool pool;
- ArenaAllocator arena(&pool);
- void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16);
- void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
+ ArenaAllocator allocator(&pool);
+ void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16);
+ void* alloc2 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
ASSERT_NE(alloc1, alloc2);
- ASSERT_EQ(2u, NumberOfArenas(&arena));
+ ASSERT_EQ(2u, NumberOfArenas(&allocator));
// Note: Leaving some space for memory tool red zones.
- void* alloc3 = arena.Alloc(arena_allocator::kArenaDefaultSize * 5 / 16);
+ void* alloc3 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 5 / 16);
ASSERT_NE(alloc1, alloc3);
ASSERT_NE(alloc2, alloc3);
- ASSERT_EQ(2u, NumberOfArenas(&arena));
+ ASSERT_EQ(2u, NumberOfArenas(&allocator));
}
{
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
// Note: Leaving some space for memory tool red zones.
for (size_t i = 0; i != 15; ++i) {
- arena.Alloc(arena_allocator::kArenaDefaultSize * 1 / 16); // Allocate 15 times from the same arena.
- ASSERT_EQ(i + 1u, NumberOfArenas(&arena));
- arena.Alloc(arena_allocator::kArenaDefaultSize * 17 / 16); // Allocate a separate arena.
- ASSERT_EQ(i + 2u, NumberOfArenas(&arena));
+ // Allocate 15 times from the same arena.
+ allocator.Alloc(arena_allocator::kArenaDefaultSize * 1 / 16);
+ ASSERT_EQ(i + 1u, NumberOfArenas(&allocator));
+ // Allocate a separate arena.
+ allocator.Alloc(arena_allocator::kArenaDefaultSize * 17 / 16);
+ ASSERT_EQ(i + 2u, NumberOfArenas(&allocator));
}
}
}
TEST_F(ArenaAllocatorTest, AllocAlignment) {
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
for (size_t iterations = 0; iterations <= 10; ++iterations) {
for (size_t size = 1; size <= ArenaAllocator::kAlignment + 1; ++size) {
- void* allocation = arena.Alloc(size);
+ void* allocation = allocator.Alloc(size);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(allocation))
<< reinterpret_cast<uintptr_t>(allocation);
}
@@ -152,52 +154,52 @@
{
// Case 1: small aligned allocation, aligned extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
const size_t new_size = ArenaAllocator::kAlignment * 3;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_EQ(original_allocation, realloc_allocation);
}
{
// Case 2: small aligned allocation, non-aligned extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
const size_t new_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_EQ(original_allocation, realloc_allocation);
}
{
// Case 3: small non-aligned allocation, aligned extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
const size_t new_size = ArenaAllocator::kAlignment * 4;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_EQ(original_allocation, realloc_allocation);
}
{
// Case 4: small non-aligned allocation, aligned non-extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
const size_t new_size = ArenaAllocator::kAlignment * 3;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_EQ(original_allocation, realloc_allocation);
}
@@ -207,31 +209,31 @@
{
// Case 5: large allocation, aligned extend into next arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = arena_allocator::kArenaDefaultSize -
ArenaAllocator::kAlignment * 5;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
const size_t new_size = arena_allocator::kArenaDefaultSize + ArenaAllocator::kAlignment * 2;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_NE(original_allocation, realloc_allocation);
}
{
// Case 6: large allocation, non-aligned extend into next arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = arena_allocator::kArenaDefaultSize -
ArenaAllocator::kAlignment * 4 -
ArenaAllocator::kAlignment / 2;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
const size_t new_size = arena_allocator::kArenaDefaultSize +
ArenaAllocator::kAlignment * 2 +
ArenaAllocator::kAlignment / 2;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_NE(original_allocation, realloc_allocation);
}
}
@@ -240,68 +242,68 @@
{
// Case 1: small aligned allocation, aligned extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
const size_t new_size = ArenaAllocator::kAlignment * 3;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
- void* after_alloc = arena.Alloc(1);
+ void* after_alloc = allocator.Alloc(1);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
}
{
// Case 2: small aligned allocation, non-aligned extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
const size_t new_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
- void* after_alloc = arena.Alloc(1);
+ void* after_alloc = allocator.Alloc(1);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
}
{
// Case 3: small non-aligned allocation, aligned extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
const size_t new_size = ArenaAllocator::kAlignment * 4;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
- void* after_alloc = arena.Alloc(1);
+ void* after_alloc = allocator.Alloc(1);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
}
{
// Case 4: small non-aligned allocation, aligned non-extend inside arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
const size_t new_size = ArenaAllocator::kAlignment * 3;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
- void* after_alloc = arena.Alloc(1);
+ void* after_alloc = allocator.Alloc(1);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
}
@@ -311,39 +313,39 @@
{
// Case 5: large allocation, aligned extend into next arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = arena_allocator::kArenaDefaultSize -
ArenaAllocator::kAlignment * 5;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
const size_t new_size = arena_allocator::kArenaDefaultSize + ArenaAllocator::kAlignment * 2;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
- void* after_alloc = arena.Alloc(1);
+ void* after_alloc = allocator.Alloc(1);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
}
{
// Case 6: large allocation, non-aligned extend into next arena.
ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator allocator(&pool);
const size_t original_size = arena_allocator::kArenaDefaultSize -
ArenaAllocator::kAlignment * 4 -
ArenaAllocator::kAlignment / 2;
- void* original_allocation = arena.Alloc(original_size);
+ void* original_allocation = allocator.Alloc(original_size);
ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
const size_t new_size = arena_allocator::kArenaDefaultSize +
ArenaAllocator::kAlignment * 2 +
ArenaAllocator::kAlignment / 2;
- void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ void* realloc_allocation = allocator.Realloc(original_allocation, original_size, new_size);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
- void* after_alloc = arena.Alloc(1);
+ void* after_alloc = allocator.Alloc(1);
EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
}
}
diff --git a/runtime/base/arena_bit_vector.cc b/runtime/base/arena_bit_vector.cc
index 5f8f5d2..1542e9d 100644
--- a/runtime/base/arena_bit_vector.cc
+++ b/runtime/base/arena_bit_vector.cc
@@ -52,9 +52,9 @@
template <typename ArenaAlloc>
class ArenaBitVectorAllocator FINAL : public Allocator, private ArenaBitVectorAllocatorKind {
public:
- static ArenaBitVectorAllocator* Create(ArenaAlloc* arena, ArenaAllocKind kind) {
- void* storage = arena->template Alloc<ArenaBitVectorAllocator>(kind);
- return new (storage) ArenaBitVectorAllocator(arena, kind);
+ static ArenaBitVectorAllocator* Create(ArenaAlloc* allocator, ArenaAllocKind kind) {
+ void* storage = allocator->template Alloc<ArenaBitVectorAllocator>(kind);
+ return new (storage) ArenaBitVectorAllocator(allocator, kind);
}
~ArenaBitVectorAllocator() {
@@ -63,36 +63,36 @@
}
virtual void* Alloc(size_t size) {
- return arena_->Alloc(size, this->Kind());
+ return allocator_->Alloc(size, this->Kind());
}
virtual void Free(void*) {} // Nop.
private:
- ArenaBitVectorAllocator(ArenaAlloc* arena, ArenaAllocKind kind)
- : ArenaBitVectorAllocatorKind(kind), arena_(arena) { }
+ ArenaBitVectorAllocator(ArenaAlloc* allocator, ArenaAllocKind kind)
+ : ArenaBitVectorAllocatorKind(kind), allocator_(allocator) { }
- ArenaAlloc* const arena_;
+ ArenaAlloc* const allocator_;
DISALLOW_COPY_AND_ASSIGN(ArenaBitVectorAllocator);
};
-ArenaBitVector::ArenaBitVector(ArenaAllocator* arena,
+ArenaBitVector::ArenaBitVector(ArenaAllocator* allocator,
unsigned int start_bits,
bool expandable,
ArenaAllocKind kind)
: BitVector(start_bits,
expandable,
- ArenaBitVectorAllocator<ArenaAllocator>::Create(arena, kind)) {
+ ArenaBitVectorAllocator<ArenaAllocator>::Create(allocator, kind)) {
}
-ArenaBitVector::ArenaBitVector(ScopedArenaAllocator* arena,
+ArenaBitVector::ArenaBitVector(ScopedArenaAllocator* allocator,
unsigned int start_bits,
bool expandable,
ArenaAllocKind kind)
: BitVector(start_bits,
expandable,
- ArenaBitVectorAllocator<ScopedArenaAllocator>::Create(arena, kind)) {
+ ArenaBitVectorAllocator<ScopedArenaAllocator>::Create(allocator, kind)) {
}
} // namespace art
diff --git a/runtime/base/arena_bit_vector.h b/runtime/base/arena_bit_vector.h
index d86d622..ca1d5b1 100644
--- a/runtime/base/arena_bit_vector.h
+++ b/runtime/base/arena_bit_vector.h
@@ -31,19 +31,19 @@
class ArenaBitVector : public BitVector, public ArenaObject<kArenaAllocGrowableBitMap> {
public:
template <typename Allocator>
- static ArenaBitVector* Create(Allocator* arena,
+ static ArenaBitVector* Create(Allocator* allocator,
uint32_t start_bits,
bool expandable,
ArenaAllocKind kind = kArenaAllocGrowableBitMap) {
- void* storage = arena->template Alloc<ArenaBitVector>(kind);
- return new (storage) ArenaBitVector(arena, start_bits, expandable, kind);
+ void* storage = allocator->template Alloc<ArenaBitVector>(kind);
+ return new (storage) ArenaBitVector(allocator, start_bits, expandable, kind);
}
- ArenaBitVector(ArenaAllocator* arena,
+ ArenaBitVector(ArenaAllocator* allocator,
uint32_t start_bits,
bool expandable,
ArenaAllocKind kind = kArenaAllocGrowableBitMap);
- ArenaBitVector(ScopedArenaAllocator* arena,
+ ArenaBitVector(ScopedArenaAllocator* allocator,
uint32_t start_bits,
bool expandable,
ArenaAllocKind kind = kArenaAllocGrowableBitMap);
diff --git a/runtime/base/file_magic.cc b/runtime/base/file_magic.cc
index 568a7ae..30b4f05 100644
--- a/runtime/base/file_magic.cc
+++ b/runtime/base/file_magic.cc
@@ -55,8 +55,4 @@
('K' == ((magic >> 8) & 0xff)));
}
-bool IsDexMagic(uint32_t magic) {
- return DexFile::IsMagicValid(reinterpret_cast<const uint8_t*>(&magic));
-}
-
} // namespace art
diff --git a/runtime/base/file_magic.h b/runtime/base/file_magic.h
index 4b5d2f5..1c9effd 100644
--- a/runtime/base/file_magic.h
+++ b/runtime/base/file_magic.h
@@ -29,7 +29,6 @@
// Check whether the given magic matches a known file type.
bool IsZipMagic(uint32_t magic);
-bool IsDexMagic(uint32_t magic);
} // namespace art
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 0cc2622..fe91272 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -51,6 +51,7 @@
#include "compiler_callbacks.h"
#include "debugger.h"
#include "dex_file-inl.h"
+#include "dex_file_loader.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "experimental_flags.h"
@@ -8714,10 +8715,11 @@
const DexFile& dex_file = klass->GetDexFile();
if (&dex_file != last_dex_file_) {
last_dex_file_ = &dex_file;
- DexCacheResolvedClasses resolved_classes(dex_file.GetLocation(),
- dex_file.GetBaseLocation(),
- dex_file.GetLocationChecksum(),
- dex_file.NumMethodIds());
+ DexCacheResolvedClasses resolved_classes(
+ dex_file.GetLocation(),
+ DexFileLoader::GetBaseLocation(dex_file.GetLocation()),
+ dex_file.GetLocationChecksum(),
+ dex_file.NumMethodIds());
last_resolved_classes_ = result_->find(resolved_classes);
if (last_resolved_classes_ == result_->end()) {
last_resolved_classes_ = result_->insert(resolved_classes).first;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index f887b8e..3d9fd59 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -26,7 +26,7 @@
#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
-#include "dex_file.h"
+#include "native_dex_file.h"
#include "dex_file_types.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "experimental_flags.h"
@@ -1462,11 +1462,11 @@
dex_cache->SetLocation(location.Get());
const DexFile* old_dex_file = dex_cache->GetDexFile();
- std::unique_ptr<DexFile> dex_file(new DexFile(old_dex_file->Begin(),
- old_dex_file->Size(),
- location->ToModifiedUtf8(),
- 0u,
- nullptr));
+ std::unique_ptr<DexFile> dex_file(new NativeDexFile(old_dex_file->Begin(),
+ old_dex_file->Size(),
+ location->ToModifiedUtf8(),
+ 0u,
+ nullptr));
{
WriterMutexLock mu(soa.Self(), *Locks::dex_lock_);
// Check that inserting with a UTF16 name works.
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index 2282da0..167533d 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -25,6 +25,7 @@
#include "class_linker.h"
#include "class_loader_utils.h"
#include "dex_file.h"
+#include "dex_file_loader.h"
#include "handle_scope-inl.h"
#include "jni_internal.h"
#include "oat_file_assistant.h"
@@ -227,11 +228,11 @@
std::string error_msg;
// When opening the dex files from the context we expect their checksum to match their
// contents. So pass true to verify_checksum.
- if (!DexFile::Open(location.c_str(),
- location.c_str(),
- /*verify_checksum*/ true,
- &error_msg,
- &info.opened_dex_files)) {
+ if (!DexFileLoader::Open(location.c_str(),
+ location.c_str(),
+ /*verify_checksum*/ true,
+ &error_msg,
+ &info.opened_dex_files)) {
// If we fail to open the dex file because it's been stripped, try to open the dex file
// from its corresponding oat file.
// This could happen when we need to recompile a pre-build whose dex code has been stripped.
@@ -282,7 +283,7 @@
std::set<std::string> canonical_locations;
for (const std::string& location : locations) {
- canonical_locations.insert(DexFile::GetDexCanonicalLocation(location.c_str()));
+ canonical_locations.insert(DexFileLoader::GetDexCanonicalLocation(location.c_str()));
}
bool removed_locations = false;
for (ClassLoaderInfo& info : class_loader_chain_) {
@@ -292,7 +293,7 @@
info.classpath.end(),
[canonical_locations](const std::string& location) {
return ContainsElement(canonical_locations,
- DexFile::GetDexCanonicalLocation(location.c_str()));
+ DexFileLoader::GetDexCanonicalLocation(location.c_str()));
});
info.classpath.erase(kept_it, info.classpath.end());
if (initial_size != info.classpath.size()) {
@@ -340,7 +341,8 @@
if (for_dex2oat) {
// dex2oat only needs the base location. It cannot accept multidex locations.
// So ensure we only add each file once.
- bool new_insert = seen_locations.insert(dex_file->GetBaseLocation()).second;
+ bool new_insert = seen_locations.insert(
+ DexFileLoader::GetBaseLocation(dex_file->GetLocation())).second;
if (!new_insert) {
continue;
}
diff --git a/runtime/class_loader_context_test.cc b/runtime/class_loader_context_test.cc
index ae3dcec..be6acde 100644
--- a/runtime/class_loader_context_test.cc
+++ b/runtime/class_loader_context_test.cc
@@ -100,12 +100,13 @@
info.opened_dex_files[cur_open_dex_index++];
std::unique_ptr<const DexFile>& expected_dex_file = (*all_dex_files)[k];
- std::string expected_location = expected_dex_file->GetBaseLocation();
+ std::string expected_location =
+ DexFileLoader::GetBaseLocation(expected_dex_file->GetLocation());
UniqueCPtr<const char[]> expected_real_location(
realpath(expected_location.c_str(), nullptr));
ASSERT_TRUE(expected_real_location != nullptr) << expected_location;
expected_location.assign(expected_real_location.get());
- expected_location += DexFile::GetMultiDexSuffix(expected_dex_file->GetLocation());
+ expected_location += DexFileLoader::GetMultiDexSuffix(expected_dex_file->GetLocation());
ASSERT_EQ(expected_location, opened_dex_file->GetLocation());
ASSERT_EQ(expected_dex_file->GetLocationChecksum(), opened_dex_file->GetLocationChecksum());
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 29b376a..0c2e490 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -34,6 +34,7 @@
#include "class_linker.h"
#include "compiler_callbacks.h"
#include "dex_file-inl.h"
+#include "dex_file_loader.h"
#include "gc/heap.h"
#include "gc_root-inl.h"
#include "gtest/gtest.h"
@@ -372,7 +373,7 @@
std::string error_msg;
MemMap::Init();
static constexpr bool kVerifyChecksum = true;
- if (!DexFile::Open(location, location, kVerifyChecksum, &error_msg, &dex_files)) {
+ if (!DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files)) {
LOG(FATAL) << "Could not open .dex file '" << location << "': " << error_msg << "\n";
UNREACHABLE();
} else {
@@ -571,7 +572,7 @@
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- bool success = DexFile::Open(
+ bool success = DexFileLoader::Open(
filename.c_str(), filename.c_str(), kVerifyChecksum, &error_msg, &dex_files);
CHECK(success) << "Failed to open '" << filename << "': " << error_msg;
for (auto& dex_file : dex_files) {
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index 93daa45..a9bb954 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -26,6 +26,7 @@
#include "base/stl_util.h"
#include "common_runtime_test.h"
#include "compiler_callbacks.h"
+#include "dex_file_loader.h"
#include "exec_utils.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
@@ -71,7 +72,8 @@
<< "Expected dex file to be at: " << GetDexSrc1();
ASSERT_TRUE(OS::FileExists(GetStrippedDexSrc1().c_str()))
<< "Expected stripped dex file to be at: " << GetStrippedDexSrc1();
- ASSERT_FALSE(DexFile::GetMultiDexChecksums(GetStrippedDexSrc1().c_str(), &checksums, &error_msg))
+ ASSERT_FALSE(
+ DexFileLoader::GetMultiDexChecksums(GetStrippedDexSrc1().c_str(), &checksums, &error_msg))
<< "Expected stripped dex file to be stripped: " << GetStrippedDexSrc1();
ASSERT_TRUE(OS::FileExists(GetDexSrc2().c_str()))
<< "Expected dex file to be at: " << GetDexSrc2();
@@ -80,13 +82,19 @@
// GetMultiDexSrc1, but a different secondary dex checksum.
static constexpr bool kVerifyChecksum = true;
std::vector<std::unique_ptr<const DexFile>> multi1;
- ASSERT_TRUE(DexFile::Open(GetMultiDexSrc1().c_str(),
- GetMultiDexSrc1().c_str(), kVerifyChecksum, &error_msg, &multi1)) << error_msg;
+ ASSERT_TRUE(DexFileLoader::Open(GetMultiDexSrc1().c_str(),
+ GetMultiDexSrc1().c_str(),
+ kVerifyChecksum,
+ &error_msg,
+ &multi1)) << error_msg;
ASSERT_GT(multi1.size(), 1u);
std::vector<std::unique_ptr<const DexFile>> multi2;
- ASSERT_TRUE(DexFile::Open(GetMultiDexSrc2().c_str(),
- GetMultiDexSrc2().c_str(), kVerifyChecksum, &error_msg, &multi2)) << error_msg;
+ ASSERT_TRUE(DexFileLoader::Open(GetMultiDexSrc2().c_str(),
+ GetMultiDexSrc2().c_str(),
+ kVerifyChecksum,
+ &error_msg,
+ &multi2)) << error_msg;
ASSERT_GT(multi2.size(), 1u);
ASSERT_EQ(multi1[0]->GetLocationChecksum(), multi2[0]->GetLocationChecksum());
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 2e776b0..f6b3428 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -23,7 +23,6 @@
#include <string.h>
#include <sys/file.h>
#include <sys/mman.h> // For the PROT_* and MAP_* constants.
-#include <sys/stat.h>
#include <zlib.h>
#include <memory>
@@ -33,19 +32,17 @@
#include "android-base/stringprintf.h"
#include "base/enums.h"
-#include "base/file_magic.h"
#include "base/logging.h"
#include "base/stl_util.h"
-#include "base/systrace.h"
-#include "base/unix_file/fd_file.h"
#include "dex_file-inl.h"
-#include "dex_file_verifier.h"
+#include "dex_file_loader.h"
#include "jvalue.h"
#include "leb128.h"
+#include "mem_map.h"
+#include "native_dex_file.h"
#include "os.h"
#include "utf-inl.h"
#include "utils.h"
-#include "zip_archive.h"
namespace art {
@@ -56,22 +53,6 @@
static_assert(sizeof(dex::TypeIndex) == sizeof(uint16_t), "TypeIndex size is wrong");
static_assert(std::is_trivially_copyable<dex::TypeIndex>::value, "TypeIndex not trivial");
-static constexpr OatDexFile* kNoOatDexFile = nullptr;
-
-const char* DexFile::kClassesDex = "classes.dex";
-
-const uint8_t DexFile::kDexMagic[] = { 'd', 'e', 'x', '\n' };
-const uint8_t DexFile::kDexMagicVersions[DexFile::kNumDexVersions][DexFile::kDexVersionLen] = {
- {'0', '3', '5', '\0'},
- // Dex version 036 skipped because of an old dalvik bug on some versions of android where dex
- // files with that version number would erroneously be accepted and run.
- {'0', '3', '7', '\0'},
- // Dex version 038: Android "O".
- {'0', '3', '8', '\0'},
- // Dex verion 039: Beyond Android "O".
- {'0', '3', '9', '\0'},
-};
-
uint32_t DexFile::CalculateChecksum() const {
const uint32_t non_sum = OFFSETOF_MEMBER(DexFile::Header, signature_);
const uint8_t* non_sum_ptr = Begin() + non_sum;
@@ -83,55 +64,6 @@
uint8_t type_;
};
-bool DexFile::GetMultiDexChecksums(const char* filename,
- std::vector<uint32_t>* checksums,
- std::string* error_msg) {
- CHECK(checksums != nullptr);
- uint32_t magic;
-
- File fd = OpenAndReadMagic(filename, &magic, error_msg);
- if (fd.Fd() == -1) {
- DCHECK(!error_msg->empty());
- return false;
- }
- if (IsZipMagic(magic)) {
- std::unique_ptr<ZipArchive> zip_archive(
- ZipArchive::OpenFromFd(fd.Release(), filename, error_msg));
- if (zip_archive.get() == nullptr) {
- *error_msg = StringPrintf("Failed to open zip archive '%s' (error msg: %s)", filename,
- error_msg->c_str());
- return false;
- }
-
- uint32_t i = 0;
- std::string zip_entry_name = GetMultiDexClassesDexName(i++);
- std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(zip_entry_name.c_str(), error_msg));
- if (zip_entry.get() == nullptr) {
- *error_msg = StringPrintf("Zip archive '%s' doesn't contain %s (error msg: %s)", filename,
- zip_entry_name.c_str(), error_msg->c_str());
- return false;
- }
-
- do {
- checksums->push_back(zip_entry->GetCrc32());
- zip_entry_name = DexFile::GetMultiDexClassesDexName(i++);
- zip_entry.reset(zip_archive->Find(zip_entry_name.c_str(), error_msg));
- } while (zip_entry.get() != nullptr);
- return true;
- }
- if (IsDexMagic(magic)) {
- std::unique_ptr<const DexFile> dex_file(
- DexFile::OpenFile(fd.Release(), filename, false, false, error_msg));
- if (dex_file.get() == nullptr) {
- return false;
- }
- checksums->push_back(dex_file->GetHeader().checksum_);
- return true;
- }
- *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
- return false;
-}
-
int DexFile::GetPermissions() const {
if (mem_map_.get() == nullptr) {
return 0;
@@ -162,367 +94,6 @@
}
}
-
-std::unique_ptr<const DexFile> DexFile::Open(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) {
- ScopedTrace trace(std::string("Open dex file from RAM ") + location);
- return OpenCommon(base,
- size,
- location,
- location_checksum,
- oat_dex_file,
- verify,
- verify_checksum,
- error_msg);
-}
-
-std::unique_ptr<const DexFile> DexFile::Open(const std::string& location,
- uint32_t location_checksum,
- std::unique_ptr<MemMap> map,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) {
- ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
- CHECK(map.get() != nullptr);
-
- if (map->Size() < sizeof(DexFile::Header)) {
- *error_msg = StringPrintf(
- "DexFile: failed to open dex file '%s' that is too short to have a header",
- location.c_str());
- return nullptr;
- }
-
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
- location,
- location_checksum,
- kNoOatDexFile,
- verify,
- verify_checksum,
- error_msg);
- if (dex_file != nullptr) {
- dex_file->mem_map_ = std::move(map);
- }
- return dex_file;
-}
-
-bool DexFile::Open(const char* filename,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- ScopedTrace trace(std::string("Open dex file ") + std::string(location));
- DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr";
- uint32_t magic;
- File fd = OpenAndReadMagic(filename, &magic, error_msg);
- if (fd.Fd() == -1) {
- DCHECK(!error_msg->empty());
- return false;
- }
- if (IsZipMagic(magic)) {
- return DexFile::OpenZip(fd.Release(), location, verify_checksum, error_msg, dex_files);
- }
- if (IsDexMagic(magic)) {
- std::unique_ptr<const DexFile> dex_file(DexFile::OpenFile(fd.Release(),
- location,
- /* verify */ true,
- verify_checksum,
- error_msg));
- if (dex_file.get() != nullptr) {
- dex_files->push_back(std::move(dex_file));
- return true;
- } else {
- return false;
- }
- }
- *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
- return false;
-}
-
-std::unique_ptr<const DexFile> DexFile::OpenDex(int fd,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg) {
- ScopedTrace trace("Open dex file " + std::string(location));
- return OpenFile(fd, location, true /* verify */, verify_checksum, error_msg);
-}
-
-bool DexFile::OpenZip(int fd,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- ScopedTrace trace("Dex file open Zip " + std::string(location));
- DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr";
- std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg));
- if (zip_archive.get() == nullptr) {
- DCHECK(!error_msg->empty());
- return false;
- }
- return DexFile::OpenAllDexFilesFromZip(*zip_archive,
- location,
- verify_checksum,
- error_msg,
- dex_files);
-}
-
-std::unique_ptr<const DexFile> DexFile::OpenFile(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) {
- ScopedTrace trace(std::string("Open dex file ") + std::string(location));
- CHECK(!location.empty());
- std::unique_ptr<MemMap> map;
- {
- File delayed_close(fd, /* check_usage */ false);
- struct stat sbuf;
- memset(&sbuf, 0, sizeof(sbuf));
- if (fstat(fd, &sbuf) == -1) {
- *error_msg = StringPrintf("DexFile: fstat '%s' failed: %s", location.c_str(),
- strerror(errno));
- return nullptr;
- }
- if (S_ISDIR(sbuf.st_mode)) {
- *error_msg = StringPrintf("Attempt to mmap directory '%s'", location.c_str());
- return nullptr;
- }
- size_t length = sbuf.st_size;
- map.reset(MemMap::MapFile(length,
- PROT_READ,
- MAP_PRIVATE,
- fd,
- 0,
- /*low_4gb*/false,
- location.c_str(),
- error_msg));
- if (map == nullptr) {
- DCHECK(!error_msg->empty());
- return nullptr;
- }
- }
-
- if (map->Size() < sizeof(DexFile::Header)) {
- *error_msg = StringPrintf(
- "DexFile: failed to open dex file '%s' that is too short to have a header",
- location.c_str());
- return nullptr;
- }
-
- const Header* dex_header = reinterpret_cast<const Header*>(map->Begin());
-
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
- location,
- dex_header->checksum_,
- kNoOatDexFile,
- verify,
- verify_checksum,
- error_msg);
- if (dex_file != nullptr) {
- dex_file->mem_map_ = std::move(map);
- }
-
- return dex_file;
-}
-
-std::unique_ptr<const DexFile> DexFile::OpenOneDexFileFromZip(const ZipArchive& zip_archive,
- const char* entry_name,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg,
- ZipOpenErrorCode* error_code) {
- ScopedTrace trace("Dex file open from Zip Archive " + std::string(location));
- CHECK(!location.empty());
- std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg));
- if (zip_entry == nullptr) {
- *error_code = ZipOpenErrorCode::kEntryNotFound;
- return nullptr;
- }
- if (zip_entry->GetUncompressedLength() == 0) {
- *error_msg = StringPrintf("Dex file '%s' has zero length", location.c_str());
- *error_code = ZipOpenErrorCode::kDexFileError;
- return nullptr;
- }
-
- std::unique_ptr<MemMap> map;
- if (zip_entry->IsUncompressed()) {
- if (!zip_entry->IsAlignedTo(alignof(Header))) {
- // Do not mmap unaligned ZIP entries because
- // doing so would fail dex verification which requires 4 byte alignment.
- LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
- << "please zipalign to " << alignof(Header) << " bytes. "
- << "Falling back to extracting file.";
- } else {
- // Map uncompressed files within zip as file-backed to avoid a dirty copy.
- map.reset(zip_entry->MapDirectlyFromFile(location.c_str(), /*out*/error_msg));
- if (map == nullptr) {
- LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
- << "is your ZIP file corrupted? Falling back to extraction.";
- // Try again with Extraction which still has a chance of recovery.
- }
- }
- }
-
- if (map == nullptr) {
- // Default path for compressed ZIP entries,
- // and fallback for stored ZIP entries.
- map.reset(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
- }
-
- if (map == nullptr) {
- *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
- error_msg->c_str());
- *error_code = ZipOpenErrorCode::kExtractToMemoryError;
- return nullptr;
- }
- VerifyResult verify_result;
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
- location,
- zip_entry->GetCrc32(),
- kNoOatDexFile,
- /* verify */ true,
- verify_checksum,
- error_msg,
- &verify_result);
- if (dex_file == nullptr) {
- if (verify_result == VerifyResult::kVerifyNotAttempted) {
- *error_code = ZipOpenErrorCode::kDexFileError;
- } else {
- *error_code = ZipOpenErrorCode::kVerifyError;
- }
- return nullptr;
- }
- dex_file->mem_map_ = std::move(map);
- if (!dex_file->DisableWrite()) {
- *error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str());
- *error_code = ZipOpenErrorCode::kMakeReadOnlyError;
- return nullptr;
- }
- CHECK(dex_file->IsReadOnly()) << location;
- if (verify_result != VerifyResult::kVerifySucceeded) {
- *error_code = ZipOpenErrorCode::kVerifyError;
- return nullptr;
- }
- *error_code = ZipOpenErrorCode::kNoError;
- return dex_file;
-}
-
-// Technically we do not have a limitation with respect to the number of dex files that can be in a
-// multidex APK. However, it's bad practice, as each dex file requires its own tables for symbols
-// (types, classes, methods, ...) and dex caches. So warn the user that we open a zip with what
-// seems an excessive number.
-static constexpr size_t kWarnOnManyDexFilesThreshold = 100;
-
-bool DexFile::OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- ScopedTrace trace("Dex file open from Zip " + std::string(location));
- DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
- ZipOpenErrorCode error_code;
- std::unique_ptr<const DexFile> dex_file(OpenOneDexFileFromZip(zip_archive,
- kClassesDex,
- location,
- verify_checksum,
- error_msg,
- &error_code));
- if (dex_file.get() == nullptr) {
- return false;
- } else {
- // Had at least classes.dex.
- dex_files->push_back(std::move(dex_file));
-
- // Now try some more.
-
- // We could try to avoid std::string allocations by working on a char array directly. As we
- // do not expect a lot of iterations, this seems too involved and brittle.
-
- for (size_t i = 1; ; ++i) {
- std::string name = GetMultiDexClassesDexName(i);
- std::string fake_location = GetMultiDexLocation(i, location.c_str());
- std::unique_ptr<const DexFile> next_dex_file(OpenOneDexFileFromZip(zip_archive,
- name.c_str(),
- fake_location,
- verify_checksum,
- error_msg,
- &error_code));
- if (next_dex_file.get() == nullptr) {
- if (error_code != ZipOpenErrorCode::kEntryNotFound) {
- LOG(WARNING) << "Zip open failed: " << *error_msg;
- }
- break;
- } else {
- dex_files->push_back(std::move(next_dex_file));
- }
-
- if (i == kWarnOnManyDexFilesThreshold) {
- LOG(WARNING) << location << " has in excess of " << kWarnOnManyDexFilesThreshold
- << " dex files. Please consider coalescing and shrinking the number to "
- " avoid runtime overhead.";
- }
-
- if (i == std::numeric_limits<size_t>::max()) {
- LOG(ERROR) << "Overflow in number of dex files!";
- break;
- }
- }
-
- return true;
- }
-}
-
-std::unique_ptr<DexFile> DexFile::OpenCommon(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- VerifyResult* verify_result) {
- if (verify_result != nullptr) {
- *verify_result = VerifyResult::kVerifyNotAttempted;
- }
- std::unique_ptr<DexFile> dex_file(new DexFile(base,
- size,
- location,
- location_checksum,
- oat_dex_file));
- if (dex_file == nullptr) {
- *error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location.c_str(),
- error_msg->c_str());
- return nullptr;
- }
- if (!dex_file->Init(error_msg)) {
- dex_file.reset();
- return nullptr;
- }
- if (verify && !DexFileVerifier::Verify(dex_file.get(),
- dex_file->Begin(),
- dex_file->Size(),
- location.c_str(),
- verify_checksum,
- error_msg)) {
- if (verify_result != nullptr) {
- *verify_result = VerifyResult::kVerifyFailed;
- }
- return nullptr;
- }
- if (verify_result != nullptr) {
- *verify_result = VerifyResult::kVerifySucceeded;
- }
- return dex_file;
-}
-
DexFile::DexFile(const uint8_t* base,
size_t size,
const std::string& location,
@@ -569,7 +140,7 @@
}
bool DexFile::CheckMagicAndVersion(std::string* error_msg) const {
- if (!IsMagicValid(header_->magic_)) {
+ if (!IsMagicValid()) {
std::ostringstream oss;
oss << "Unrecognized magic number in " << GetLocation() << ":"
<< " " << header_->magic_[0]
@@ -579,7 +150,7 @@
*error_msg = oss.str();
return false;
}
- if (!IsVersionValid(header_->magic_)) {
+ if (!IsVersionValid()) {
std::ostringstream oss;
oss << "Unrecognized version number in " << GetLocation() << ":"
<< " " << header_->magic_[4]
@@ -619,22 +190,8 @@
}
}
-bool DexFile::IsMagicValid(const uint8_t* magic) {
- return (memcmp(magic, kDexMagic, sizeof(kDexMagic)) == 0);
-}
-
-bool DexFile::IsVersionValid(const uint8_t* magic) {
- const uint8_t* version = &magic[sizeof(kDexMagic)];
- for (uint32_t i = 0; i < kNumDexVersions; i++) {
- if (memcmp(version, kDexMagicVersions[i], kDexVersionLen) == 0) {
- return true;
- }
- }
- return false;
-}
-
uint32_t DexFile::Header::GetVersion() const {
- const char* version = reinterpret_cast<const char*>(&magic_[sizeof(kDexMagic)]);
+ const char* version = reinterpret_cast<const char*>(&magic_[kDexMagicSize]);
return atoi(version);
}
@@ -1218,41 +775,6 @@
}
}
-bool DexFile::IsMultiDexLocation(const char* location) {
- return strrchr(location, kMultiDexSeparator) != nullptr;
-}
-
-std::string DexFile::GetMultiDexClassesDexName(size_t index) {
- if (index == 0) {
- return "classes.dex";
- } else {
- return StringPrintf("classes%zu.dex", index + 1);
- }
-}
-
-std::string DexFile::GetMultiDexLocation(size_t index, const char* dex_location) {
- if (index == 0) {
- return dex_location;
- } else {
- return StringPrintf("%s" kMultiDexSeparatorString "classes%zu.dex", dex_location, index + 1);
- }
-}
-
-std::string DexFile::GetDexCanonicalLocation(const char* dex_location) {
- CHECK_NE(dex_location, static_cast<const char*>(nullptr));
- std::string base_location = GetBaseLocation(dex_location);
- const char* suffix = dex_location + base_location.size();
- DCHECK(suffix[0] == 0 || suffix[0] == kMultiDexSeparator);
- UniqueCPtr<const char[]> path(realpath(base_location.c_str(), nullptr));
- if (path != nullptr && path.get() != base_location) {
- return std::string(path.get()) + suffix;
- } else if (suffix[0] == 0) {
- return base_location;
- } else {
- return dex_location;
- }
-}
-
// Read a signed integer. "zwidth" is the zero-based byte count.
int32_t DexFile::ReadSignedInt(const uint8_t* ptr, int zwidth) {
int32_t val = 0;
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 516b379..5759684 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -39,34 +39,28 @@
class StringPiece;
class ZipArchive;
+// Dex file is the API that exposes native dex files (ordinary dex files) and CompactDex.
+// Originally, the dex file format used by ART was mostly the same as APKs. The only change was
+// quickened opcodes and layout optimizations.
+// Since ART needs to support both native dex files and CompactDex files, the DexFile interface
+// provides an abstraction to facilitate this.
class DexFile {
public:
+ // Number of bytes in the dex file magic.
+ static constexpr size_t kDexMagicSize = 4;
+ static constexpr size_t kDexVersionLen = 4;
+
// First Dex format version supporting default methods.
static const uint32_t kDefaultMethodsVersion = 37;
// First Dex format version enforcing class definition ordering rules.
static const uint32_t kClassDefinitionOrderEnforcedVersion = 37;
- static const uint8_t kDexMagic[];
- static constexpr size_t kNumDexVersions = 4;
- static constexpr size_t kDexVersionLen = 4;
- static const uint8_t kDexMagicVersions[kNumDexVersions][kDexVersionLen];
-
static constexpr size_t kSha1DigestSize = 20;
static constexpr uint32_t kDexEndianConstant = 0x12345678;
- // name of the DexFile entry within a zip archive
- static const char* kClassesDex;
-
// The value of an invalid index.
static const uint16_t kDexNoIndex16 = 0xFFFF;
- // The separator character in MultiDex locations.
- static constexpr char kMultiDexSeparator = '!';
-
- // A string version of the previous. This is a define so that we can merge string literals in the
- // preprocessor.
- #define kMultiDexSeparatorString "!"
-
// Raw header_item.
struct Header {
uint8_t magic_[8];
@@ -433,57 +427,6 @@
struct AnnotationValue;
- // Returns the checksums of a file for comparison with GetLocationChecksum().
- // For .dex files, this is the single header checksum.
- // For zip files, this is the zip entry CRC32 checksum for classes.dex and
- // each additional multidex entry classes2.dex, classes3.dex, etc.
- // Return true if the checksums could be found, false otherwise.
- static bool GetMultiDexChecksums(const char* filename,
- std::vector<uint32_t>* checksums,
- std::string* error_msg);
-
- // Check whether a location denotes a multidex dex file. This is a very simple check: returns
- // whether the string contains the separator character.
- static bool IsMultiDexLocation(const char* location);
-
- // Opens .dex file, backed by existing memory
- static std::unique_ptr<const DexFile> Open(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg);
-
- // Opens .dex file that has been memory-mapped by the caller.
- static std::unique_ptr<const DexFile> Open(const std::string& location,
- uint32_t location_checkum,
- std::unique_ptr<MemMap> mem_map,
- bool verify,
- bool verify_checksum,
- std::string* error_msg);
-
- // Opens all .dex files found in the file, guessing the container format based on file extension.
- static bool Open(const char* filename,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files);
-
- // Open a single dex file from an fd. This function closes the fd.
- static std::unique_ptr<const DexFile> OpenDex(int fd,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg);
-
- // Opens dex files from within a .jar, .zip, or .apk file
- static bool OpenZip(int fd,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files);
-
// Closes a .dex file.
virtual ~DexFile();
@@ -491,30 +434,6 @@
return location_;
}
- // For normal dex files, location and base location coincide. If a dex file is part of a multidex
- // archive, the base location is the name of the originating jar/apk, stripped of any internal
- // classes*.dex path.
- static std::string GetBaseLocation(const char* location) {
- const char* pos = strrchr(location, kMultiDexSeparator);
- return (pos == nullptr) ? location : std::string(location, pos - location);
- }
-
- static std::string GetBaseLocation(const std::string& location) {
- return GetBaseLocation(location.c_str());
- }
-
- // Returns the '!classes*.dex' part of the dex location. Returns an empty
- // string if there is no multidex suffix for the given location.
- // The kMultiDexSeparator is included in the returned suffix.
- static std::string GetMultiDexSuffix(const std::string& location) {
- size_t pos = location.rfind(kMultiDexSeparator);
- return (pos == std::string::npos) ? std::string() : location.substr(pos);
- }
-
- std::string GetBaseLocation() const {
- return GetBaseLocation(location_);
- }
-
// For DexFiles directly from .dex files, this is the checksum from the DexFile::Header.
// For DexFiles opened from a zip files, this will be the ZipEntry CRC32 of classes.dex.
uint32_t GetLocationChecksum() const {
@@ -532,10 +451,10 @@
}
// Returns true if the byte string points to the magic value.
- static bool IsMagicValid(const uint8_t* magic);
+ virtual bool IsMagicValid() const = 0;
// Returns true if the byte string after the magic is the correct value.
- static bool IsVersionValid(const uint8_t* magic);
+ virtual bool IsVersionValid() const = 0;
// Returns the number of string identifiers in the .dex file.
size_t NumStringIds() const {
@@ -1011,29 +930,6 @@
return size_;
}
- // Return the name of the index-th classes.dex in a multidex zip file. This is classes.dex for
- // index == 0, and classes{index + 1}.dex else.
- static std::string GetMultiDexClassesDexName(size_t index);
-
- // Return the (possibly synthetic) dex location for a multidex entry. This is dex_location for
- // index == 0, and dex_location + multi-dex-separator + GetMultiDexClassesDexName(index) else.
- static std::string GetMultiDexLocation(size_t index, const char* dex_location);
-
- // Returns the canonical form of the given dex location.
- //
- // There are different flavors of "dex locations" as follows:
- // the file name of a dex file:
- // The actual file path that the dex file has on disk.
- // dex_location:
- // This acts as a key for the class linker to know which dex file to load.
- // It may correspond to either an old odex file or a particular dex file
- // inside an oat file. In the first case it will also match the file name
- // of the dex file. In the second case (oat) it will include the file name
- // and possibly some multidex annotation to uniquely identify it.
- // canonical_dex_location:
- // the dex_location where it's file name part has been made canonical.
- static std::string GetDexCanonicalLocation(const char* dex_location);
-
const OatDexFile* GetOatDexFile() const {
return oat_dex_file_;
}
@@ -1059,64 +955,7 @@
// Returns a human-readable form of the type at an index.
std::string PrettyType(dex::TypeIndex type_idx) const;
- private:
- static std::unique_ptr<const DexFile> OpenFile(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg);
-
- enum class ZipOpenErrorCode { // private
- kNoError,
- kEntryNotFound,
- kExtractToMemoryError,
- kDexFileError,
- kMakeReadOnlyError,
- kVerifyError
- };
-
- // Open all classesXXX.dex files from a zip archive.
- static bool OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files);
-
- // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null
- // return.
- static std::unique_ptr<const DexFile> OpenOneDexFileFromZip(const ZipArchive& zip_archive,
- const char* entry_name,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg,
- ZipOpenErrorCode* error_code);
-
- enum class VerifyResult { // private
- kVerifyNotAttempted,
- kVerifySucceeded,
- kVerifyFailed
- };
-
- static std::unique_ptr<DexFile> OpenCommon(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- VerifyResult* verify_result = nullptr);
-
-
- // Opens a .dex file at the given address, optionally backed by a MemMap
- static std::unique_ptr<const DexFile> OpenMemory(const uint8_t* dex_file,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- std::unique_ptr<MemMap> mem_map,
- const OatDexFile* oat_dex_file,
- std::string* error_msg);
-
+ protected:
DexFile(const uint8_t* base,
size_t size,
const std::string& location,
@@ -1187,9 +1026,9 @@
// null.
mutable const OatDexFile* oat_dex_file_;
+ friend class DexFileLoader;
friend class DexFileVerifierTest;
friend class OatWriter;
- ART_FRIEND_TEST(ClassLinkerTest, RegisterDexFileName); // for constructor
};
std::ostream& operator<<(std::ostream& os, const DexFile& dex_file);
diff --git a/runtime/dex_file_loader.cc b/runtime/dex_file_loader.cc
new file mode 100644
index 0000000..3ccb755
--- /dev/null
+++ b/runtime/dex_file_loader.cc
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex_file_loader.h"
+
+#include <sys/mman.h> // For the PROT_* and MAP_* constants.
+#include <sys/stat.h>
+
+#include "android-base/stringprintf.h"
+
+#include "base/file_magic.h"
+#include "base/stl_util.h"
+#include "base/systrace.h"
+#include "base/unix_file/fd_file.h"
+#include "dex_file.h"
+#include "dex_file_verifier.h"
+#include "native_dex_file.h"
+#include "zip_archive.h"
+
+namespace art {
+
+using android::base::StringPrintf;
+
+static constexpr OatDexFile* kNoOatDexFile = nullptr;
+
+
+bool DexFileLoader::IsValidMagic(uint32_t magic) {
+ return IsValidMagic(reinterpret_cast<uint8_t*>(&magic));
+}
+
+bool DexFileLoader::IsValidMagic(const uint8_t* magic) {
+ return NativeDexFile::IsMagicValid(magic);
+}
+
+bool DexFileLoader::GetMultiDexChecksums(const char* filename,
+ std::vector<uint32_t>* checksums,
+ std::string* error_msg) {
+ CHECK(checksums != nullptr);
+ uint32_t magic;
+
+ File fd = OpenAndReadMagic(filename, &magic, error_msg);
+ if (fd.Fd() == -1) {
+ DCHECK(!error_msg->empty());
+ return false;
+ }
+ if (IsZipMagic(magic)) {
+ std::unique_ptr<ZipArchive> zip_archive(
+ ZipArchive::OpenFromFd(fd.Release(), filename, error_msg));
+ if (zip_archive.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to open zip archive '%s' (error msg: %s)", filename,
+ error_msg->c_str());
+ return false;
+ }
+
+ uint32_t i = 0;
+ std::string zip_entry_name = GetMultiDexClassesDexName(i++);
+ std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(zip_entry_name.c_str(), error_msg));
+ if (zip_entry.get() == nullptr) {
+ *error_msg = StringPrintf("Zip archive '%s' doesn't contain %s (error msg: %s)", filename,
+ zip_entry_name.c_str(), error_msg->c_str());
+ return false;
+ }
+
+ do {
+ checksums->push_back(zip_entry->GetCrc32());
+ zip_entry_name = GetMultiDexClassesDexName(i++);
+ zip_entry.reset(zip_archive->Find(zip_entry_name.c_str(), error_msg));
+ } while (zip_entry.get() != nullptr);
+ return true;
+ }
+ if (IsValidMagic(magic)) {
+ std::unique_ptr<const DexFile> dex_file(
+ OpenFile(fd.Release(), filename, false, false, error_msg));
+ if (dex_file == nullptr) {
+ return false;
+ }
+ checksums->push_back(dex_file->GetHeader().checksum_);
+ return true;
+ }
+ *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
+ return false;
+}
+
+bool DexFileLoader::IsMultiDexLocation(const char* location) {
+ return strrchr(location, kMultiDexSeparator) != nullptr;
+}
+
+std::string DexFileLoader::GetMultiDexClassesDexName(size_t index) {
+ return (index == 0) ? "classes.dex" : StringPrintf("classes%zu.dex", index + 1);
+}
+
+std::string DexFileLoader::GetMultiDexLocation(size_t index, const char* dex_location) {
+ return (index == 0)
+ ? dex_location
+ : StringPrintf("%s%cclasses%zu.dex", dex_location, kMultiDexSeparator, index + 1);
+}
+
+std::string DexFileLoader::GetDexCanonicalLocation(const char* dex_location) {
+ CHECK_NE(dex_location, static_cast<const char*>(nullptr));
+ std::string base_location = GetBaseLocation(dex_location);
+ const char* suffix = dex_location + base_location.size();
+ DCHECK(suffix[0] == 0 || suffix[0] == kMultiDexSeparator);
+ UniqueCPtr<const char[]> path(realpath(base_location.c_str(), nullptr));
+ if (path != nullptr && path.get() != base_location) {
+ return std::string(path.get()) + suffix;
+ } else if (suffix[0] == 0) {
+ return base_location;
+ } else {
+ return dex_location;
+ }
+}
+
+std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) {
+ ScopedTrace trace(std::string("Open dex file from RAM ") + location);
+ return OpenCommon(base,
+ size,
+ location,
+ location_checksum,
+ oat_dex_file,
+ verify,
+ verify_checksum,
+ error_msg);
+}
+
+std::unique_ptr<const DexFile> DexFileLoader::Open(const std::string& location,
+ uint32_t location_checksum,
+ std::unique_ptr<MemMap> map,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) {
+ ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
+ CHECK(map.get() != nullptr);
+
+ if (map->Size() < sizeof(DexFile::Header)) {
+ *error_msg = StringPrintf(
+ "DexFile: failed to open dex file '%s' that is too short to have a header",
+ location.c_str());
+ return nullptr;
+ }
+
+ std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
+ map->Size(),
+ location,
+ location_checksum,
+ kNoOatDexFile,
+ verify,
+ verify_checksum,
+ error_msg);
+ if (dex_file != nullptr) {
+ dex_file->mem_map_ = std::move(map);
+ }
+ return dex_file;
+}
+
+bool DexFileLoader::Open(const char* filename,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+ ScopedTrace trace(std::string("Open dex file ") + std::string(location));
+ DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr";
+ uint32_t magic;
+ File fd = OpenAndReadMagic(filename, &magic, error_msg);
+ if (fd.Fd() == -1) {
+ DCHECK(!error_msg->empty());
+ return false;
+ }
+ if (IsZipMagic(magic)) {
+ return OpenZip(fd.Release(), location, verify_checksum, error_msg, dex_files);
+ }
+ if (IsValidMagic(magic)) {
+ std::unique_ptr<const DexFile> dex_file(OpenFile(fd.Release(),
+ location,
+ /* verify */ true,
+ verify_checksum,
+ error_msg));
+ if (dex_file.get() != nullptr) {
+ dex_files->push_back(std::move(dex_file));
+ return true;
+ } else {
+ return false;
+ }
+ }
+ *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
+ return false;
+}
+
+std::unique_ptr<const DexFile> DexFileLoader::OpenDex(int fd,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg) {
+ ScopedTrace trace("Open dex file " + std::string(location));
+ return OpenFile(fd, location, true /* verify */, verify_checksum, error_msg);
+}
+
+bool DexFileLoader::OpenZip(int fd,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+ ScopedTrace trace("Dex file open Zip " + std::string(location));
+ DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr";
+ std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg));
+ if (zip_archive.get() == nullptr) {
+ DCHECK(!error_msg->empty());
+ return false;
+ }
+ return OpenAllDexFilesFromZip(*zip_archive, location, verify_checksum, error_msg, dex_files);
+}
+
+std::unique_ptr<const DexFile> DexFileLoader::OpenFile(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) {
+ ScopedTrace trace(std::string("Open dex file ") + std::string(location));
+ CHECK(!location.empty());
+ std::unique_ptr<MemMap> map;
+ {
+ File delayed_close(fd, /* check_usage */ false);
+ struct stat sbuf;
+ memset(&sbuf, 0, sizeof(sbuf));
+ if (fstat(fd, &sbuf) == -1) {
+ *error_msg = StringPrintf("DexFile: fstat '%s' failed: %s", location.c_str(),
+ strerror(errno));
+ return nullptr;
+ }
+ if (S_ISDIR(sbuf.st_mode)) {
+ *error_msg = StringPrintf("Attempt to mmap directory '%s'", location.c_str());
+ return nullptr;
+ }
+ size_t length = sbuf.st_size;
+ map.reset(MemMap::MapFile(length,
+ PROT_READ,
+ MAP_PRIVATE,
+ fd,
+ 0,
+ /*low_4gb*/false,
+ location.c_str(),
+ error_msg));
+ if (map == nullptr) {
+ DCHECK(!error_msg->empty());
+ return nullptr;
+ }
+ }
+
+ if (map->Size() < sizeof(DexFile::Header)) {
+ *error_msg = StringPrintf(
+ "DexFile: failed to open dex file '%s' that is too short to have a header",
+ location.c_str());
+ return nullptr;
+ }
+
+ const DexFile::Header* dex_header = reinterpret_cast<const DexFile::Header*>(map->Begin());
+
+ std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
+ map->Size(),
+ location,
+ dex_header->checksum_,
+ kNoOatDexFile,
+ verify,
+ verify_checksum,
+ error_msg);
+ if (dex_file != nullptr) {
+ dex_file->mem_map_ = std::move(map);
+ }
+
+ return dex_file;
+}
+
+std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
+ const ZipArchive& zip_archive,
+ const char* entry_name,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ ZipOpenErrorCode* error_code) {
+ ScopedTrace trace("Dex file open from Zip Archive " + std::string(location));
+ CHECK(!location.empty());
+ std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg));
+ if (zip_entry == nullptr) {
+ *error_code = ZipOpenErrorCode::kEntryNotFound;
+ return nullptr;
+ }
+ if (zip_entry->GetUncompressedLength() == 0) {
+ *error_msg = StringPrintf("Dex file '%s' has zero length", location.c_str());
+ *error_code = ZipOpenErrorCode::kDexFileError;
+ return nullptr;
+ }
+
+ std::unique_ptr<MemMap> map;
+ if (zip_entry->IsUncompressed()) {
+ if (!zip_entry->IsAlignedTo(alignof(DexFile::Header))) {
+ // Do not mmap unaligned ZIP entries because
+ // doing so would fail dex verification which requires 4 byte alignment.
+ LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
+ << "please zipalign to " << alignof(DexFile::Header) << " bytes. "
+ << "Falling back to extracting file.";
+ } else {
+ // Map uncompressed files within zip as file-backed to avoid a dirty copy.
+ map.reset(zip_entry->MapDirectlyFromFile(location.c_str(), /*out*/error_msg));
+ if (map == nullptr) {
+ LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
+ << "is your ZIP file corrupted? Falling back to extraction.";
+ // Try again with Extraction which still has a chance of recovery.
+ }
+ }
+ }
+
+ if (map == nullptr) {
+ // Default path for compressed ZIP entries,
+ // and fallback for stored ZIP entries.
+ map.reset(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
+ }
+
+ if (map == nullptr) {
+ *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
+ error_msg->c_str());
+ *error_code = ZipOpenErrorCode::kExtractToMemoryError;
+ return nullptr;
+ }
+ VerifyResult verify_result;
+ std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
+ map->Size(),
+ location,
+ zip_entry->GetCrc32(),
+ kNoOatDexFile,
+ /* verify */ true,
+ verify_checksum,
+ error_msg,
+ &verify_result);
+ if (dex_file == nullptr) {
+ if (verify_result == VerifyResult::kVerifyNotAttempted) {
+ *error_code = ZipOpenErrorCode::kDexFileError;
+ } else {
+ *error_code = ZipOpenErrorCode::kVerifyError;
+ }
+ return nullptr;
+ }
+ dex_file->mem_map_ = std::move(map);
+ if (!dex_file->DisableWrite()) {
+ *error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str());
+ *error_code = ZipOpenErrorCode::kMakeReadOnlyError;
+ return nullptr;
+ }
+ CHECK(dex_file->IsReadOnly()) << location;
+ if (verify_result != VerifyResult::kVerifySucceeded) {
+ *error_code = ZipOpenErrorCode::kVerifyError;
+ return nullptr;
+ }
+ *error_code = ZipOpenErrorCode::kNoError;
+ return dex_file;
+}
+
+// Technically we do not have a limitation with respect to the number of dex files that can be in a
+// multidex APK. However, it's bad practice, as each dex file requires its own tables for symbols
+// (types, classes, methods, ...) and dex caches. So warn the user that we open a zip with what
+// seems an excessive number.
+static constexpr size_t kWarnOnManyDexFilesThreshold = 100;
+
+bool DexFileLoader::OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+ ScopedTrace trace("Dex file open from Zip " + std::string(location));
+ DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
+ ZipOpenErrorCode error_code;
+ std::unique_ptr<const DexFile> dex_file(OpenOneDexFileFromZip(zip_archive,
+ kClassesDex,
+ location,
+ verify_checksum,
+ error_msg,
+ &error_code));
+ if (dex_file.get() == nullptr) {
+ return false;
+ } else {
+ // Had at least classes.dex.
+ dex_files->push_back(std::move(dex_file));
+
+ // Now try some more.
+
+ // We could try to avoid std::string allocations by working on a char array directly. As we
+ // do not expect a lot of iterations, this seems too involved and brittle.
+
+ for (size_t i = 1; ; ++i) {
+ std::string name = GetMultiDexClassesDexName(i);
+ std::string fake_location = GetMultiDexLocation(i, location.c_str());
+ std::unique_ptr<const DexFile> next_dex_file(OpenOneDexFileFromZip(zip_archive,
+ name.c_str(),
+ fake_location,
+ verify_checksum,
+ error_msg,
+ &error_code));
+ if (next_dex_file.get() == nullptr) {
+ if (error_code != ZipOpenErrorCode::kEntryNotFound) {
+ LOG(WARNING) << "Zip open failed: " << *error_msg;
+ }
+ break;
+ } else {
+ dex_files->push_back(std::move(next_dex_file));
+ }
+
+ if (i == kWarnOnManyDexFilesThreshold) {
+ LOG(WARNING) << location << " has in excess of " << kWarnOnManyDexFilesThreshold
+ << " dex files. Please consider coalescing and shrinking the number to "
+ " avoid runtime overhead.";
+ }
+
+ if (i == std::numeric_limits<size_t>::max()) {
+ LOG(ERROR) << "Overflow in number of dex files!";
+ break;
+ }
+ }
+
+ return true;
+ }
+}
+
+std::unique_ptr<DexFile> DexFileLoader::OpenCommon(const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ VerifyResult* verify_result) {
+ if (verify_result != nullptr) {
+ *verify_result = VerifyResult::kVerifyNotAttempted;
+ }
+ std::unique_ptr<DexFile> dex_file;
+ if (NativeDexFile::IsMagicValid(base)) {
+ dex_file.reset(new NativeDexFile(base, size, location, location_checksum, oat_dex_file));
+ } else {
+ return nullptr;
+ }
+ if (dex_file == nullptr) {
+ *error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location.c_str(),
+ error_msg->c_str());
+ return nullptr;
+ }
+ if (!dex_file->Init(error_msg)) {
+ dex_file.reset();
+ return nullptr;
+ }
+ if (verify && !DexFileVerifier::Verify(dex_file.get(),
+ dex_file->Begin(),
+ dex_file->Size(),
+ location.c_str(),
+ verify_checksum,
+ error_msg)) {
+ if (verify_result != nullptr) {
+ *verify_result = VerifyResult::kVerifyFailed;
+ }
+ return nullptr;
+ }
+ if (verify_result != nullptr) {
+ *verify_result = VerifyResult::kVerifySucceeded;
+ }
+ return dex_file;
+}
+
+} // namespace art
diff --git a/runtime/dex_file_loader.h b/runtime/dex_file_loader.h
new file mode 100644
index 0000000..61b5c71
--- /dev/null
+++ b/runtime/dex_file_loader.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEX_FILE_LOADER_H_
+#define ART_RUNTIME_DEX_FILE_LOADER_H_
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace art {
+
+class DexFile;
+class MemMap;
+class OatDexFile;
+class ZipArchive;
+
+// Class that is used to open dex files and deal with corresponding multidex and location logic.
+class DexFileLoader {
+ public:
+ // name of the DexFile entry within a zip archive
+ static constexpr const char* kClassesDex = "classes.dex";
+
+ // The separator character in MultiDex locations.
+ static constexpr char kMultiDexSeparator = '!';
+
+ // Return true if the magic is valid for dex or cdex.
+ static bool IsValidMagic(uint32_t magic);
+ static bool IsValidMagic(const uint8_t* magic);
+
+ // Returns the checksums of a file for comparison with GetLocationChecksum().
+ // For .dex files, this is the single header checksum.
+ // For zip files, this is the zip entry CRC32 checksum for classes.dex and
+ // each additional multidex entry classes2.dex, classes3.dex, etc.
+ // Return true if the checksums could be found, false otherwise.
+ static bool GetMultiDexChecksums(const char* filename,
+ std::vector<uint32_t>* checksums,
+ std::string* error_msg);
+
+ // Check whether a location denotes a multidex dex file. This is a very simple check: returns
+ // whether the string contains the separator character.
+ static bool IsMultiDexLocation(const char* location);
+
+ // Opens .dex file, backed by existing memory
+ static std::unique_ptr<const DexFile> Open(const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg);
+
+ // Opens .dex file that has been memory-mapped by the caller.
+ static std::unique_ptr<const DexFile> Open(const std::string& location,
+ uint32_t location_checkum,
+ std::unique_ptr<MemMap> mem_map,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg);
+
+ // Opens all .dex files found in the file, guessing the container format based on file extension.
+ static bool Open(const char* filename,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files);
+
+ // Open a single dex file from an fd. This function closes the fd.
+ static std::unique_ptr<const DexFile> OpenDex(int fd,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg);
+
+ // Opens dex files from within a .jar, .zip, or .apk file
+ static bool OpenZip(int fd,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files);
+
+ // Return the name of the index-th classes.dex in a multidex zip file. This is classes.dex for
+ // index == 0, and classes{index + 1}.dex else.
+ static std::string GetMultiDexClassesDexName(size_t index);
+
+ // Return the (possibly synthetic) dex location for a multidex entry. This is dex_location for
+ // index == 0, and dex_location + multi-dex-separator + GetMultiDexClassesDexName(index) else.
+ static std::string GetMultiDexLocation(size_t index, const char* dex_location);
+
+ // Returns the canonical form of the given dex location.
+ //
+ // There are different flavors of "dex locations" as follows:
+ // the file name of a dex file:
+ // The actual file path that the dex file has on disk.
+ // dex_location:
+ // This acts as a key for the class linker to know which dex file to load.
+ // It may correspond to either an old odex file or a particular dex file
+ // inside an oat file. In the first case it will also match the file name
+ // of the dex file. In the second case (oat) it will include the file name
+ // and possibly some multidex annotation to uniquely identify it.
+ // canonical_dex_location:
+ // the dex_location where it's file name part has been made canonical.
+ static std::string GetDexCanonicalLocation(const char* dex_location);
+
+ // For normal dex files, location and base location coincide. If a dex file is part of a multidex
+ // archive, the base location is the name of the originating jar/apk, stripped of any internal
+ // classes*.dex path.
+ static std::string GetBaseLocation(const char* location) {
+ const char* pos = strrchr(location, kMultiDexSeparator);
+ return (pos == nullptr) ? location : std::string(location, pos - location);
+ }
+
+ static std::string GetBaseLocation(const std::string& location) {
+ return GetBaseLocation(location.c_str());
+ }
+
+ // Returns the '!classes*.dex' part of the dex location. Returns an empty
+ // string if there is no multidex suffix for the given location.
+ // The kMultiDexSeparator is included in the returned suffix.
+ static std::string GetMultiDexSuffix(const std::string& location) {
+ size_t pos = location.rfind(kMultiDexSeparator);
+ return (pos == std::string::npos) ? std::string() : location.substr(pos);
+ }
+
+ private:
+ static std::unique_ptr<const DexFile> OpenFile(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg);
+
+ enum class ZipOpenErrorCode {
+ kNoError,
+ kEntryNotFound,
+ kExtractToMemoryError,
+ kDexFileError,
+ kMakeReadOnlyError,
+ kVerifyError
+ };
+
+ // Open all classesXXX.dex files from a zip archive.
+ static bool OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files);
+
+ // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null
+ // return.
+ static std::unique_ptr<const DexFile> OpenOneDexFileFromZip(const ZipArchive& zip_archive,
+ const char* entry_name,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ ZipOpenErrorCode* error_code);
+
+ enum class VerifyResult { // private
+ kVerifyNotAttempted,
+ kVerifySucceeded,
+ kVerifyFailed
+ };
+
+ static std::unique_ptr<DexFile> OpenCommon(const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ VerifyResult* verify_result = nullptr);
+
+
+ // Opens a .dex file at the given address, optionally backed by a MemMap
+ static std::unique_ptr<const DexFile> OpenMemory(const uint8_t* dex_file,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ std::unique_ptr<MemMap> mem_map,
+ const OatDexFile* oat_dex_file,
+ std::string* error_msg);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_DEX_FILE_LOADER_H_
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 67cd428..b301137 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -24,6 +24,7 @@
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
#include "dex_file-inl.h"
+#include "dex_file_loader.h"
#include "mem_map.h"
#include "os.h"
#include "scoped_thread_state_change-inl.h"
@@ -235,7 +236,7 @@
ScopedObjectAccess soa(Thread::Current());
static constexpr bool kVerifyChecksum = true;
std::vector<std::unique_ptr<const DexFile>> tmp;
- bool success = DexFile::Open(location, location, kVerifyChecksum, error_msg, &tmp);
+ bool success = DexFileLoader::Open(location, location, kVerifyChecksum, error_msg, &tmp);
if (success) {
for (std::unique_ptr<const DexFile>& dex_file : tmp) {
EXPECT_EQ(PROT_READ, dex_file->GetPermissions());
@@ -274,12 +275,12 @@
/* reuse */ false,
&error_message));
memcpy(region->Begin(), dex_bytes.data(), dex_bytes.size());
- std::unique_ptr<const DexFile> dex_file(DexFile::Open(location,
- location_checksum,
- std::move(region),
- /* verify */ true,
- /* verify_checksum */ true,
- &error_message));
+ std::unique_ptr<const DexFile> dex_file(DexFileLoader::Open(location,
+ location_checksum,
+ std::move(region),
+ /* verify */ true,
+ /* verify_checksum */ true,
+ &error_message));
if (expect_success) {
CHECK(dex_file != nullptr) << error_message;
} else {
@@ -365,7 +366,7 @@
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_FALSE(DexFile::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
+ ASSERT_FALSE(DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
}
TEST_F(DexFileTest, Version41Rejected) {
@@ -377,7 +378,7 @@
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_FALSE(DexFile::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
+ ASSERT_FALSE(DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
}
TEST_F(DexFileTest, ZeroLengthDexRejected) {
@@ -389,7 +390,7 @@
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_FALSE(DexFile::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
+ ASSERT_FALSE(DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
}
TEST_F(DexFileTest, GetLocationChecksum) {
@@ -402,7 +403,9 @@
std::vector<uint32_t> checksums;
ScopedObjectAccess soa(Thread::Current());
std::string error_msg;
- EXPECT_TRUE(DexFile::GetMultiDexChecksums(GetLibCoreDexFileNames()[0].c_str(), &checksums, &error_msg))
+ EXPECT_TRUE(DexFileLoader::GetMultiDexChecksums(GetLibCoreDexFileNames()[0].c_str(),
+ &checksums,
+ &error_msg))
<< error_msg;
ASSERT_EQ(1U, checksums.size());
EXPECT_EQ(java_lang_dex_file_->GetLocationChecksum(), checksums[0]);
@@ -412,18 +415,18 @@
std::string error_msg;
std::vector<uint32_t> checksums;
std::string multidex_file = GetTestDexFileName("MultiDex");
- EXPECT_TRUE(DexFile::GetMultiDexChecksums(multidex_file.c_str(),
- &checksums,
- &error_msg)) << error_msg;
+ EXPECT_TRUE(DexFileLoader::GetMultiDexChecksums(multidex_file.c_str(),
+ &checksums,
+ &error_msg)) << error_msg;
std::vector<std::unique_ptr<const DexFile>> dexes = OpenTestDexFiles("MultiDex");
ASSERT_EQ(2U, dexes.size());
ASSERT_EQ(2U, checksums.size());
- EXPECT_EQ(dexes[0]->GetLocation(), DexFile::GetMultiDexLocation(0, multidex_file.c_str()));
+ EXPECT_EQ(dexes[0]->GetLocation(), DexFileLoader::GetMultiDexLocation(0, multidex_file.c_str()));
EXPECT_EQ(dexes[0]->GetLocationChecksum(), checksums[0]);
- EXPECT_EQ(dexes[1]->GetLocation(), DexFile::GetMultiDexLocation(1, multidex_file.c_str()));
+ EXPECT_EQ(dexes[1]->GetLocation(), DexFileLoader::GetMultiDexLocation(1, multidex_file.c_str()));
EXPECT_EQ(dexes[1]->GetLocationChecksum(), checksums[1]);
}
@@ -625,20 +628,20 @@
}
TEST_F(DexFileTest, GetMultiDexClassesDexName) {
- ASSERT_EQ("classes.dex", DexFile::GetMultiDexClassesDexName(0));
- ASSERT_EQ("classes2.dex", DexFile::GetMultiDexClassesDexName(1));
- ASSERT_EQ("classes3.dex", DexFile::GetMultiDexClassesDexName(2));
- ASSERT_EQ("classes100.dex", DexFile::GetMultiDexClassesDexName(99));
+ ASSERT_EQ("classes.dex", DexFileLoader::GetMultiDexClassesDexName(0));
+ ASSERT_EQ("classes2.dex", DexFileLoader::GetMultiDexClassesDexName(1));
+ ASSERT_EQ("classes3.dex", DexFileLoader::GetMultiDexClassesDexName(2));
+ ASSERT_EQ("classes100.dex", DexFileLoader::GetMultiDexClassesDexName(99));
}
TEST_F(DexFileTest, GetMultiDexLocation) {
std::string dex_location_str = "/system/app/framework.jar";
const char* dex_location = dex_location_str.c_str();
- ASSERT_EQ("/system/app/framework.jar", DexFile::GetMultiDexLocation(0, dex_location));
+ ASSERT_EQ("/system/app/framework.jar", DexFileLoader::GetMultiDexLocation(0, dex_location));
ASSERT_EQ("/system/app/framework.jar!classes2.dex",
- DexFile::GetMultiDexLocation(1, dex_location));
+ DexFileLoader::GetMultiDexLocation(1, dex_location));
ASSERT_EQ("/system/app/framework.jar!classes101.dex",
- DexFile::GetMultiDexLocation(100, dex_location));
+ DexFileLoader::GetMultiDexLocation(100, dex_location));
}
TEST_F(DexFileTest, GetDexCanonicalLocation) {
@@ -646,28 +649,30 @@
UniqueCPtr<const char[]> dex_location_real(realpath(file.GetFilename().c_str(), nullptr));
std::string dex_location(dex_location_real.get());
- ASSERT_EQ(dex_location, DexFile::GetDexCanonicalLocation(dex_location.c_str()));
- std::string multidex_location = DexFile::GetMultiDexLocation(1, dex_location.c_str());
- ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location.c_str()));
+ ASSERT_EQ(dex_location, DexFileLoader::GetDexCanonicalLocation(dex_location.c_str()));
+ std::string multidex_location = DexFileLoader::GetMultiDexLocation(1, dex_location.c_str());
+ ASSERT_EQ(multidex_location, DexFileLoader::GetDexCanonicalLocation(multidex_location.c_str()));
std::string dex_location_sym = dex_location + "symlink";
ASSERT_EQ(0, symlink(dex_location.c_str(), dex_location_sym.c_str()));
- ASSERT_EQ(dex_location, DexFile::GetDexCanonicalLocation(dex_location_sym.c_str()));
+ ASSERT_EQ(dex_location, DexFileLoader::GetDexCanonicalLocation(dex_location_sym.c_str()));
- std::string multidex_location_sym = DexFile::GetMultiDexLocation(1, dex_location_sym.c_str());
- ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location_sym.c_str()));
+ std::string multidex_location_sym = DexFileLoader::GetMultiDexLocation(
+ 1, dex_location_sym.c_str());
+ ASSERT_EQ(multidex_location,
+ DexFileLoader::GetDexCanonicalLocation(multidex_location_sym.c_str()));
ASSERT_EQ(0, unlink(dex_location_sym.c_str()));
}
TEST(DexFileUtilsTest, GetBaseLocationAndMultiDexSuffix) {
- EXPECT_EQ("/foo/bar/baz.jar", DexFile::GetBaseLocation("/foo/bar/baz.jar"));
- EXPECT_EQ("/foo/bar/baz.jar", DexFile::GetBaseLocation("/foo/bar/baz.jar!classes2.dex"));
- EXPECT_EQ("/foo/bar/baz.jar", DexFile::GetBaseLocation("/foo/bar/baz.jar!classes8.dex"));
- EXPECT_EQ("", DexFile::GetMultiDexSuffix("/foo/bar/baz.jar"));
- EXPECT_EQ("!classes2.dex", DexFile::GetMultiDexSuffix("/foo/bar/baz.jar!classes2.dex"));
- EXPECT_EQ("!classes8.dex", DexFile::GetMultiDexSuffix("/foo/bar/baz.jar!classes8.dex"));
+ EXPECT_EQ("/foo/bar/baz.jar", DexFileLoader::GetBaseLocation("/foo/bar/baz.jar"));
+ EXPECT_EQ("/foo/bar/baz.jar", DexFileLoader::GetBaseLocation("/foo/bar/baz.jar!classes2.dex"));
+ EXPECT_EQ("/foo/bar/baz.jar", DexFileLoader::GetBaseLocation("/foo/bar/baz.jar!classes8.dex"));
+ EXPECT_EQ("", DexFileLoader::GetMultiDexSuffix("/foo/bar/baz.jar"));
+ EXPECT_EQ("!classes2.dex", DexFileLoader::GetMultiDexSuffix("/foo/bar/baz.jar!classes2.dex"));
+ EXPECT_EQ("!classes8.dex", DexFileLoader::GetMultiDexSuffix("/foo/bar/baz.jar!classes8.dex"));
}
TEST_F(DexFileTest, ZipOpenClassesPresent) {
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 21de059..af2d535 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -27,8 +27,10 @@
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
#include "dex_file-inl.h"
+#include "dex_file_loader.h"
#include "dex_file_types.h"
#include "leb128.h"
+#include "native_dex_file.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
#include "utils.h"
@@ -55,7 +57,7 @@
class DexFileVerifierTest : public CommonRuntimeTest {
protected:
DexFile* GetDexFile(const uint8_t* dex_bytes, size_t length) {
- return new DexFile(dex_bytes, length, "tmp", 0, nullptr);
+ return new NativeDexFile(dex_bytes, length, "tmp", 0, nullptr);
}
void VerifyModification(const char* dex_file_base64_content,
@@ -112,7 +114,7 @@
// read dex file
ScopedObjectAccess soa(Thread::Current());
std::vector<std::unique_ptr<const DexFile>> tmp;
- bool success = DexFile::Open(location, location, true, error_msg, &tmp);
+ bool success = DexFileLoader::Open(location, location, true, error_msg, &tmp);
CHECK(success) << *error_msg;
EXPECT_EQ(1U, tmp.size());
std::unique_ptr<const DexFile> dex_file = std::move(tmp[0]);
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 732c707..f0eada3 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -35,6 +35,7 @@
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/time_utils.h"
+#include "dex_file_loader.h"
#include "exec_utils.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "image-inl.h"
@@ -1829,12 +1830,12 @@
// Skip multidex locations - These will be checked when we visit their
// corresponding primary non-multidex location.
- if (DexFile::IsMultiDexLocation(dex_file_location.c_str())) {
+ if (DexFileLoader::IsMultiDexLocation(dex_file_location.c_str())) {
continue;
}
std::vector<uint32_t> checksums;
- if (!DexFile::GetMultiDexChecksums(dex_file_location.c_str(), &checksums, error_msg)) {
+ if (!DexFileLoader::GetMultiDexChecksums(dex_file_location.c_str(), &checksums, error_msg)) {
*error_msg = StringPrintf("ValidateOatFile failed to get checksums of dex file '%s' "
"referenced by oat file %s: %s",
dex_file_location.c_str(),
@@ -1855,7 +1856,9 @@
// Verify checksums for any related multidex entries.
for (size_t i = 1; i < checksums.size(); i++) {
- std::string multi_dex_location = DexFile::GetMultiDexLocation(i, dex_file_location.c_str());
+ std::string multi_dex_location = DexFileLoader::GetMultiDexLocation(
+ i,
+ dex_file_location.c_str());
const OatFile::OatDexFile* multi_dex = oat_file.GetOatDexFile(multi_dex_location.c_str(),
nullptr,
error_msg);
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 2dd4db3..2c8ec47 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -511,7 +511,7 @@
return true;
}
-size_t IndirectReferenceTable::FreeCapacity() {
+size_t IndirectReferenceTable::FreeCapacity() const {
return max_entries_ - segment_state_.top_index;
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 7daf01c..6675099 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -293,7 +293,7 @@
REQUIRES_SHARED(Locks::mutator_lock_);
// See implementation of EnsureFreeCapacity. We'll only state here how much is trivially free,
// without recovering holes. Thus this is a conservative estimate.
- size_t FreeCapacity() REQUIRES_SHARED(Locks::mutator_lock_);
+ size_t FreeCapacity() const;
// Note IrtIterator does not have a read barrier as it's used to visit roots.
IrtIterator begin() {
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 5a16053..73746e1 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -28,6 +28,8 @@
#include "check_jni.h"
#include "dex_file-inl.h"
#include "fault_handler.h"
+#include "gc/allocation_record.h"
+#include "gc/heap.h"
#include "gc_root-inl.h"
#include "indirect_reference_table-inl.h"
#include "jni_internal.h"
@@ -468,7 +470,11 @@
weak_globals_add_condition_("weak globals add condition",
(CHECK(Locks::jni_weak_globals_lock_ != nullptr),
*Locks::jni_weak_globals_lock_)),
- env_hooks_() {
+ env_hooks_(),
+ enable_allocation_tracking_delta_(
+ runtime_options.GetOrDefault(RuntimeArgumentMap::GlobalRefAllocStackTraceLimit)),
+ allocation_tracking_enabled_(false),
+ old_allocation_tracking_state_(false) {
functions = unchecked_functions_;
SetCheckJniEnabled(runtime_options.Exists(RuntimeArgumentMap::CheckJni));
}
@@ -583,18 +589,55 @@
return true;
}
+void JavaVMExt::CheckGlobalRefAllocationTracking() {
+ if (LIKELY(enable_allocation_tracking_delta_ == 0)) {
+ return;
+ }
+ size_t simple_free_capacity = globals_.FreeCapacity();
+ if (UNLIKELY(simple_free_capacity <= enable_allocation_tracking_delta_)) {
+ if (!allocation_tracking_enabled_) {
+ LOG(WARNING) << "Global reference storage appears close to exhaustion, program termination "
+ << "may be imminent. Enabling allocation tracking to improve abort diagnostics. "
+ << "This will result in program slow-down.";
+
+ old_allocation_tracking_state_ = runtime_->GetHeap()->IsAllocTrackingEnabled();
+ if (!old_allocation_tracking_state_) {
+ // Need to be guaranteed suspended.
+ ScopedObjectAccess soa(Thread::Current());
+ ScopedThreadSuspension sts(soa.Self(), ThreadState::kNative);
+ gc::AllocRecordObjectMap::SetAllocTrackingEnabled(true);
+ }
+ allocation_tracking_enabled_ = true;
+ }
+ } else {
+ if (UNLIKELY(allocation_tracking_enabled_)) {
+ if (!old_allocation_tracking_state_) {
+ // Need to be guaranteed suspended.
+ ScopedObjectAccess soa(Thread::Current());
+ ScopedThreadSuspension sts(soa.Self(), ThreadState::kNative);
+ gc::AllocRecordObjectMap::SetAllocTrackingEnabled(false);
+ }
+ allocation_tracking_enabled_ = false;
+ }
+ }
+}
+
jobject JavaVMExt::AddGlobalRef(Thread* self, ObjPtr<mirror::Object> obj) {
// Check for null after decoding the object to handle cleared weak globals.
if (obj == nullptr) {
return nullptr;
}
- WriterMutexLock mu(self, *Locks::jni_globals_lock_);
+ IndirectRef ref;
std::string error_msg;
- IndirectRef ref = globals_.Add(kIRTFirstSegment, obj, &error_msg);
+ {
+ WriterMutexLock mu(self, *Locks::jni_globals_lock_);
+ ref = globals_.Add(kIRTFirstSegment, obj, &error_msg);
+ }
if (UNLIKELY(ref == nullptr)) {
LOG(FATAL) << error_msg;
UNREACHABLE();
}
+ CheckGlobalRefAllocationTracking();
return reinterpret_cast<jobject>(ref);
}
@@ -625,11 +668,14 @@
if (obj == nullptr) {
return;
}
- WriterMutexLock mu(self, *Locks::jni_globals_lock_);
- if (!globals_.Remove(kIRTFirstSegment, obj)) {
- LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
- << "failed to find entry";
+ {
+ WriterMutexLock mu(self, *Locks::jni_globals_lock_);
+ if (!globals_.Remove(kIRTFirstSegment, obj)) {
+ LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
+ << "failed to find entry";
+ }
}
+ CheckGlobalRefAllocationTracking();
}
void JavaVMExt::DeleteWeakGlobalRef(Thread* self, jweak obj) {
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index b767b19..0510d6a 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -211,6 +211,8 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::jni_weak_globals_lock_);
+ void CheckGlobalRefAllocationTracking();
+
Runtime* const runtime_;
// Used for testing. By default, we'll LOG(FATAL) the reason.
@@ -247,6 +249,10 @@
// TODO Maybe move this to Runtime.
std::vector<GetEnvHook> env_hooks_;
+ size_t enable_allocation_tracking_delta_;
+ std::atomic<bool> allocation_tracking_enabled_;
+ std::atomic<bool> old_allocation_tracking_state_;
+
DISALLOW_COPY_AND_ASSIGN(JavaVMExt);
};
diff --git a/runtime/java_vm_ext_test.cc b/runtime/java_vm_ext_test.cc
index 2cbfa81..a15ec56 100644
--- a/runtime/java_vm_ext_test.cc
+++ b/runtime/java_vm_ext_test.cc
@@ -19,6 +19,7 @@
#include <pthread.h>
#include "common_runtime_test.h"
+#include "gc/heap.h"
#include "java_vm_ext.h"
#include "runtime.h"
@@ -134,4 +135,49 @@
EXPECT_EQ(JNI_ERR, err);
}
+class JavaVmExtStackTraceTest : public JavaVmExtTest {
+ protected:
+ void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ options->emplace_back("-XX:GlobalRefAllocStackTraceLimit=50000", nullptr);
+ }
+};
+
+TEST_F(JavaVmExtStackTraceTest, TestEnableDisable) {
+ ASSERT_FALSE(Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
+
+ JNIEnv* env;
+ jint ok = vm_->AttachCurrentThread(&env, nullptr);
+ ASSERT_EQ(JNI_OK, ok);
+
+ std::vector<jobject> global_refs_;
+ jobject local_ref = env->NewStringUTF("Dummy");
+ for (size_t i = 0; i < 2000; ++i) {
+ global_refs_.push_back(env->NewGlobalRef(local_ref));
+ }
+
+ EXPECT_TRUE(Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
+
+ for (jobject global_ref : global_refs_) {
+ env->DeleteGlobalRef(global_ref);
+ }
+
+ EXPECT_FALSE(Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
+
+ global_refs_.clear();
+ for (size_t i = 0; i < 2000; ++i) {
+ global_refs_.push_back(env->NewGlobalRef(local_ref));
+ }
+
+ EXPECT_TRUE(Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
+
+ for (jobject global_ref : global_refs_) {
+ env->DeleteGlobalRef(global_ref);
+ }
+
+ EXPECT_FALSE(Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
+
+ ok = vm_->DetachCurrentThread();
+ EXPECT_EQ(JNI_OK, ok);
+}
+
} // namespace art
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index e122c6d..47615f5 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -26,6 +26,7 @@
#include "base/time_utils.h"
#include "cha.h"
#include "debugger_interface.h"
+#include "dex_file_loader.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/bitmap-inl.h"
#include "gc/scoped_gc_critical_section.h"
@@ -1350,7 +1351,8 @@
for (const ProfilingInfo* info : profiling_infos_) {
ArtMethod* method = info->GetMethod();
const DexFile* dex_file = method->GetDexFile();
- if (!ContainsElement(dex_base_locations, dex_file->GetBaseLocation())) {
+ const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
+ if (!ContainsElement(dex_base_locations, base_location)) {
// Skip dex files which are not profiled.
continue;
}
@@ -1404,7 +1406,8 @@
is_missing_types = true;
continue;
}
- if (ContainsElement(dex_base_locations, class_dex_file->GetBaseLocation())) {
+ if (ContainsElement(dex_base_locations,
+ DexFileLoader::GetBaseLocation(class_dex_file->GetLocation()))) {
// Only consider classes from the same apk (including multidex).
profile_classes.emplace_back(/*ProfileMethodInfo::ProfileClassReference*/
class_dex_file, type_index);
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index f9603a7..19501de 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -40,6 +40,7 @@
#include "base/systrace.h"
#include "base/time_utils.h"
#include "base/unix_file/fd_file.h"
+#include "dex_file_loader.h"
#include "jit/profiling_info.h"
#include "os.h"
#include "safe_map.h"
@@ -1537,7 +1538,7 @@
os << dex_data->profile_key;
} else {
// Replace the (empty) multidex suffix of the first key with a substitute for easier reading.
- std::string multidex_suffix = DexFile::GetMultiDexSuffix(dex_data->profile_key);
+ std::string multidex_suffix = DexFileLoader::GetMultiDexSuffix(dex_data->profile_key);
os << (multidex_suffix.empty() ? kFirstDexFileKeySubstitute : multidex_suffix);
}
os << " [index=" << static_cast<uint32_t>(dex_data->profile_index) << "]";
@@ -1696,7 +1697,7 @@
const uint16_t kFavorSplit = 2;
for (uint16_t i = 0; i < number_of_dex_files; i++) {
- std::string dex_location = DexFile::GetMultiDexLocation(i, base_dex_location.c_str());
+ std::string dex_location = DexFileLoader::GetMultiDexLocation(i, base_dex_location.c_str());
std::string profile_key = GetProfileDexFileKey(dex_location);
for (uint16_t m = 0; m < number_of_methods; m++) {
@@ -1828,7 +1829,7 @@
ProfileCompilationInfo::DexFileData::FindOrAddMethod(uint16_t method_index) {
return &(method_map.FindOrAdd(
method_index,
- InlineCacheMap(std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)))->second);
+ InlineCacheMap(std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)))->second);
}
// Mark a method as executed at least once.
@@ -1847,7 +1848,7 @@
if ((flags & MethodHotness::kFlagHot) != 0) {
method_map.FindOrAdd(
index,
- InlineCacheMap(std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)));
+ InlineCacheMap(std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)));
}
return true;
}
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index 8889b34..8dbb43f 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -434,7 +434,7 @@
uint32_t location_checksum,
uint16_t index,
uint32_t num_methods)
- : arena_(allocator),
+ : allocator_(allocator),
profile_key(key),
profile_index(index),
checksum(location_checksum),
@@ -466,8 +466,8 @@
MethodHotness GetHotnessInfo(uint32_t dex_method_index) const;
- // The arena used to allocate new inline cache maps.
- ArenaAllocator* arena_;
+ // The allocator used to allocate new inline cache maps.
+ ArenaAllocator* const allocator_;
// The profile key this data belongs to.
std::string profile_key;
// The profile index of this dex file (matches ClassReference#dex_profile_index).
diff --git a/runtime/jit/profile_compilation_info_test.cc b/runtime/jit/profile_compilation_info_test.cc
index 2cb8294..f155d7e 100644
--- a/runtime/jit/profile_compilation_info_test.cc
+++ b/runtime/jit/profile_compilation_info_test.cc
@@ -39,7 +39,7 @@
class ProfileCompilationInfoTest : public CommonRuntimeTest {
public:
void PostRuntimeCreate() OVERRIDE {
- arena_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
+ allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
}
protected:
@@ -176,7 +176,7 @@
// Creates an inline cache which will be destructed at the end of the test.
ProfileCompilationInfo::InlineCacheMap* CreateInlineCacheMap() {
used_inline_caches.emplace_back(new ProfileCompilationInfo::InlineCacheMap(
- std::less<uint16_t>(), arena_->Adapter(kArenaAllocProfile)));
+ std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)));
return used_inline_caches.back().get();
}
@@ -188,7 +188,7 @@
for (const auto& inline_cache : pmi.inline_caches) {
ProfileCompilationInfo::DexPcData& dex_pc_data =
ic_map->FindOrAdd(
- inline_cache.dex_pc, ProfileCompilationInfo::DexPcData(arena_.get()))->second;
+ inline_cache.dex_pc, ProfileCompilationInfo::DexPcData(allocator_.get()))->second;
if (inline_cache.is_missing_types) {
dex_pc_data.SetIsMissingTypes();
}
@@ -215,13 +215,13 @@
// Monomorphic
for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
ic_map->Put(dex_pc, dex_pc_data);
}
// Polymorphic
for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
dex_pc_data.AddClass(1, dex::TypeIndex(1));
dex_pc_data.AddClass(2, dex::TypeIndex(2));
@@ -230,13 +230,13 @@
}
// Megamorphic
for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMegamorphic();
ic_map->Put(dex_pc, dex_pc_data);
}
// Missing types
for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMissingTypes();
ic_map->Put(dex_pc, dex_pc_data);
}
@@ -273,7 +273,7 @@
static constexpr int kProfileMagicSize = 4;
static constexpr int kProfileVersionSize = 4;
- std::unique_ptr<ArenaAllocator> arena_;
+ std::unique_ptr<ArenaAllocator> allocator_;
// Cache of inline caches generated during tests.
// This makes it easier to pass data between different utilities and ensure that
@@ -730,7 +730,7 @@
pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
pmi.dex_references.emplace_back("dex_location2", /* checksum */ 2, kMaxMethodIds);
for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
dex_pc_data.AddClass(1, dex::TypeIndex(1));
ic_map->Put(dex_pc, dex_pc_data);
@@ -741,7 +741,7 @@
pmi_reindexed.dex_references.emplace_back("dex_location2", /* checksum */ 2, kMaxMethodIds);
pmi_reindexed.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(1, dex::TypeIndex(0));
dex_pc_data.AddClass(0, dex::TypeIndex(1));
ic_map_reindexed->Put(dex_pc, dex_pc_data);
@@ -795,7 +795,7 @@
ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMegamorphic();
ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
@@ -825,7 +825,7 @@
ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
- ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMissingTypes();
ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 2bf8d8b..01853de 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -31,6 +31,7 @@
#include "base/time_utils.h"
#include "class_table-inl.h"
#include "compiler_filter.h"
+#include "dex_file_loader.h"
#include "dex_reference_collection.h"
#include "gc/collector_type.h"
#include "gc/gc_cause.h"
@@ -414,7 +415,8 @@
const std::set<std::string>& locations = it.second;
for (const auto& pair : hot_methods.GetMap()) {
const DexFile* const dex_file = pair.first;
- if (locations.find(dex_file->GetBaseLocation()) != locations.end()) {
+ const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
+ if (locations.find(base_location) != locations.end()) {
const MethodReferenceCollection::IndexVector& indices = pair.second;
uint8_t flags = Hotness::kFlagHot;
flags |= startup ? Hotness::kFlagStartup : Hotness::kFlagPostStartup;
@@ -427,7 +429,8 @@
}
for (const auto& pair : sampled_methods.GetMap()) {
const DexFile* const dex_file = pair.first;
- if (locations.find(dex_file->GetBaseLocation()) != locations.end()) {
+ const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
+ if (locations.find(base_location) != locations.end()) {
const MethodReferenceCollection::IndexVector& indices = pair.second;
cached_info->AddMethodsForDex(startup ? Hotness::kFlagStartup : Hotness::kFlagPostStartup,
dex_file,
@@ -437,14 +440,15 @@
}
for (const auto& pair : resolved_classes.GetMap()) {
const DexFile* const dex_file = pair.first;
- if (locations.find(dex_file->GetBaseLocation()) != locations.end()) {
+ const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
+ if (locations.find(base_location) != locations.end()) {
const TypeReferenceCollection::IndexVector& classes = pair.second;
VLOG(profiler) << "Added " << classes.size() << " classes for location "
- << dex_file->GetBaseLocation()
+ << base_location
<< " (" << dex_file->GetLocation() << ")";
cached_info->AddClassesForDex(dex_file, classes.begin(), classes.end());
} else {
- VLOG(profiler) << "Location not found " << dex_file->GetBaseLocation()
+ VLOG(profiler) << "Location not found " << base_location
<< " (" << dex_file->GetLocation() << ")";
}
}
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index d40e6d9..e75d097 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -26,6 +26,7 @@
#include "common_throws.h"
#include "compiler_filter.h"
#include "dex_file-inl.h"
+#include "dex_file_loader.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
@@ -185,12 +186,12 @@
dex_mem_map->Begin(),
dex_mem_map->End());
std::string error_message;
- std::unique_ptr<const DexFile> dex_file(DexFile::Open(location,
- 0,
- std::move(dex_mem_map),
- /* verify */ true,
- /* verify_location */ true,
- &error_message));
+ std::unique_ptr<const DexFile> dex_file(DexFileLoader::Open(location,
+ 0,
+ std::move(dex_mem_map),
+ /* verify */ true,
+ /* verify_location */ true,
+ &error_message));
if (dex_file == nullptr) {
ScopedObjectAccess soa(env);
ThrowWrappedIOException("%s", error_message.c_str());
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index 4034e8c..413149c 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -17,6 +17,7 @@
#include "java_lang_VMClassLoader.h"
#include "class_linker.h"
+#include "dex_file_loader.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
@@ -135,7 +136,7 @@
const DexFile* dex_file = path[i];
// For multidex locations, e.g., x.jar!classes2.dex, we want to look into x.jar.
- const std::string& location(dex_file->GetBaseLocation());
+ const std::string location(DexFileLoader::GetBaseLocation(dex_file->GetLocation()));
ScopedLocalRef<jstring> javaPath(env, env->NewStringUTF(location.c_str()));
if (javaPath.get() == nullptr) {
diff --git a/runtime/native_dex_file.cc b/runtime/native_dex_file.cc
new file mode 100644
index 0000000..9a93696
--- /dev/null
+++ b/runtime/native_dex_file.cc
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "native_dex_file.h"
+
+namespace art {
+
+const uint8_t NativeDexFile::kDexMagic[] = { 'd', 'e', 'x', '\n' };
+const uint8_t NativeDexFile::kDexMagicVersions[NativeDexFile::kNumDexVersions]
+ [NativeDexFile::kDexVersionLen] = {
+ {'0', '3', '5', '\0'},
+ // Dex version 036 skipped because of an old dalvik bug on some versions of android where dex
+ // files with that version number would erroneously be accepted and run.
+ {'0', '3', '7', '\0'},
+ // Dex version 038: Android "O" and beyond.
+ {'0', '3', '8', '\0'},
+ // Dex verion 039: Beyond Android "O".
+ {'0', '3', '9', '\0'},
+};
+
+bool NativeDexFile::IsMagicValid(const uint8_t* magic) {
+ return (memcmp(magic, kDexMagic, sizeof(kDexMagic)) == 0);
+}
+
+bool NativeDexFile::IsVersionValid(const uint8_t* magic) {
+ const uint8_t* version = &magic[sizeof(kDexMagic)];
+ for (uint32_t i = 0; i < kNumDexVersions; i++) {
+ if (memcmp(version, kDexMagicVersions[i], kDexVersionLen) == 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool NativeDexFile::IsMagicValid() const {
+ return IsMagicValid(header_->magic_);
+}
+
+bool NativeDexFile::IsVersionValid() const {
+ return IsVersionValid(header_->magic_);
+}
+
+} // namespace art
diff --git a/runtime/native_dex_file.h b/runtime/native_dex_file.h
new file mode 100644
index 0000000..8f09e6d
--- /dev/null
+++ b/runtime/native_dex_file.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_DEX_FILE_H_
+#define ART_RUNTIME_NATIVE_DEX_FILE_H_
+
+#include <iosfwd>
+
+#include "dex_file.h"
+
+namespace art {
+
+class OatDexFile;
+
+// Native (ordinary) dex file. This is the format that is packaged in APKs and produced by tools.
+class NativeDexFile : public DexFile {
+ public:
+ static const uint8_t kDexMagic[kDexMagicSize];
+ static constexpr size_t kNumDexVersions = 4;
+ static const uint8_t kDexMagicVersions[kNumDexVersions][kDexVersionLen];
+
+ // Returns true if the byte string points to the magic value.
+ static bool IsMagicValid(const uint8_t* magic);
+ virtual bool IsMagicValid() const OVERRIDE;
+
+ // Returns true if the byte string after the magic is the correct value.
+ static bool IsVersionValid(const uint8_t* magic);
+ virtual bool IsVersionValid() const OVERRIDE;
+
+ private:
+ NativeDexFile(const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file)
+ : DexFile(base, size, location, location_checksum, oat_dex_file) {}
+
+ friend class DexFileLoader;
+ friend class DexFileVerifierTest;
+
+ ART_FRIEND_TEST(ClassLinkerTest, RegisterDexFileName); // for constructor
+
+ DISALLOW_COPY_AND_ASSIGN(NativeDexFile);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_DEX_FILE_H_
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 3f4cb94..ab820fb 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -26,6 +26,7 @@
#include <cstring>
#include <sstream>
#include <type_traits>
+#include <sys/stat.h>
// dlopen_ext support from bionic.
#ifdef ART_TARGET_ANDROID
@@ -41,6 +42,7 @@
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
#include "dex_file_types.h"
+#include "dex_file_loader.h"
#include "elf_file.h"
#include "elf_utils.h"
#include "gc_root.h"
@@ -48,6 +50,7 @@
#include "mem_map.h"
#include "mirror/class.h"
#include "mirror/object-inl.h"
+#include "native_dex_file.h"
#include "oat.h"
#include "oat_file-inl.h"
#include "oat_file_manager.h"
@@ -105,6 +108,19 @@
const char* abs_dex_location,
std::string* error_msg);
+ template <typename kOatFileBaseSubType>
+ static OatFileBase* OpenOatFile(int vdex_fd,
+ int oat_fd,
+ const std::string& vdex_filename,
+ const std::string& oat_filename,
+ uint8_t* requested_base,
+ uint8_t* oat_file_begin,
+ bool writable,
+ bool executable,
+ bool low_4gb,
+ const char* abs_dex_location,
+ std::string* error_msg);
+
protected:
OatFileBase(const std::string& filename, bool executable) : OatFile(filename, executable) {}
@@ -118,6 +134,12 @@
bool low_4gb,
std::string* error_msg);
+ bool LoadVdex(int vdex_fd,
+ const std::string& vdex_filename,
+ bool writable,
+ bool low_4gb,
+ std::string* error_msg);
+
virtual bool Load(const std::string& elf_filename,
uint8_t* oat_file_begin,
bool writable,
@@ -125,6 +147,13 @@
bool low_4gb,
std::string* error_msg) = 0;
+ virtual bool Load(int oat_fd,
+ uint8_t* oat_file_begin,
+ bool writable,
+ bool executable,
+ bool low_4gb,
+ std::string* error_msg) = 0;
+
bool ComputeFields(uint8_t* requested_base,
const std::string& file_path,
std::string* error_msg);
@@ -192,6 +221,46 @@
return ret.release();
}
+template <typename kOatFileBaseSubType>
+OatFileBase* OatFileBase::OpenOatFile(int vdex_fd,
+ int oat_fd,
+ const std::string& vdex_location,
+ const std::string& oat_location,
+ uint8_t* requested_base,
+ uint8_t* oat_file_begin,
+ bool writable,
+ bool executable,
+ bool low_4gb,
+ const char* abs_dex_location,
+ std::string* error_msg) {
+ std::unique_ptr<OatFileBase> ret(new kOatFileBaseSubType(oat_location, executable));
+
+ if (kIsVdexEnabled && !ret->LoadVdex(vdex_fd, vdex_location, writable, low_4gb, error_msg)) {
+ return nullptr;
+ }
+
+ if (!ret->Load(oat_fd,
+ oat_file_begin,
+ writable,
+ executable,
+ low_4gb,
+ error_msg)) {
+ return nullptr;
+ }
+
+ if (!ret->ComputeFields(requested_base, oat_location, error_msg)) {
+ return nullptr;
+ }
+
+ ret->PreSetup(oat_location);
+
+ if (!ret->Setup(abs_dex_location, error_msg)) {
+ return nullptr;
+ }
+
+ return ret.release();
+}
+
bool OatFileBase::LoadVdex(const std::string& vdex_filename,
bool writable,
bool low_4gb,
@@ -206,6 +275,33 @@
return true;
}
+bool OatFileBase::LoadVdex(int vdex_fd,
+ const std::string& vdex_filename,
+ bool writable,
+ bool low_4gb,
+ std::string* error_msg) {
+ if (vdex_fd != -1) {
+ struct stat s;
+ int rc = TEMP_FAILURE_RETRY(fstat(vdex_fd, &s));
+ if (rc == -1) {
+ PLOG(WARNING) << "Failed getting length of vdex file";
+ } else {
+ vdex_ = VdexFile::Open(vdex_fd,
+ s.st_size,
+ vdex_filename,
+ writable,
+ low_4gb,
+ false /* unquicken */,
+ error_msg);
+ if (vdex_.get() == nullptr) {
+ *error_msg = "Failed opening vdex file.";
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
bool OatFileBase::ComputeFields(uint8_t* requested_base,
const std::string& file_path,
std::string* error_msg) {
@@ -458,7 +554,9 @@
}
const uint8_t* dex_file_pointer = DexBegin() + dex_file_offset;
- if (UNLIKELY(!DexFile::IsMagicValid(dex_file_pointer))) {
+
+ const bool valid_magic = NativeDexFile::IsMagicValid(dex_file_pointer);
+ if (UNLIKELY(!valid_magic)) {
*error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with invalid "
"dex file magic '%s'",
GetLocation().c_str(),
@@ -467,7 +565,7 @@
dex_file_pointer);
return false;
}
- if (UNLIKELY(!DexFile::IsVersionValid(dex_file_pointer))) {
+ if (UNLIKELY(!NativeDexFile::IsVersionValid(dex_file_pointer))) {
*error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with invalid "
"dex file version '%s'",
GetLocation().c_str(),
@@ -611,7 +709,8 @@
reinterpret_cast<const DexFile::Header*>(dex_file_pointer)->method_ids_size_);
}
- std::string canonical_location = DexFile::GetDexCanonicalLocation(dex_file_location.c_str());
+ std::string canonical_location =
+ DexFileLoader::GetDexCanonicalLocation(dex_file_location.c_str());
// Create the OatDexFile and add it to the owning container.
OatDexFile* oat_dex_file = new OatDexFile(this,
@@ -712,6 +811,10 @@
bool low_4gb,
std::string* error_msg) OVERRIDE;
+ bool Load(int, uint8_t*, bool, bool, bool, std::string*) {
+ return false;
+ }
+
// Ask the linker where it mmaped the file and notify our mmap wrapper of the regions.
void PreSetup(const std::string& elf_filename) OVERRIDE;
@@ -973,6 +1076,13 @@
bool low_4gb,
std::string* error_msg) OVERRIDE;
+ bool Load(int oat_fd,
+ uint8_t* oat_file_begin, // Override where the file is loaded to if not null
+ bool writable,
+ bool executable,
+ bool low_4gb,
+ std::string* error_msg) OVERRIDE;
+
void PreSetup(const std::string& elf_filename ATTRIBUTE_UNUSED) OVERRIDE {
}
@@ -1065,6 +1175,31 @@
error_msg);
}
+bool ElfOatFile::Load(int oat_fd,
+ uint8_t* oat_file_begin, // Override where the file is loaded to if not null
+ bool writable,
+ bool executable,
+ bool low_4gb,
+ std::string* error_msg) {
+ ScopedTrace trace(__PRETTY_FUNCTION__);
+ if (oat_fd != -1) {
+ std::unique_ptr<File> file = std::make_unique<File>(oat_fd, false);
+ file->DisableAutoClose();
+ if (file == nullptr) {
+ *error_msg = StringPrintf("Failed to open oat filename for reading: %s",
+ strerror(errno));
+ return false;
+ }
+ return ElfOatFile::ElfFileOpen(file.get(),
+ oat_file_begin,
+ writable,
+ executable,
+ low_4gb,
+ error_msg);
+ }
+ return false;
+}
+
bool ElfOatFile::ElfFileOpen(File* file,
uint8_t* oat_file_begin,
bool writable,
@@ -1096,8 +1231,8 @@
const char* abs_dex_location, const std::string& rel_dex_location) {
if (abs_dex_location != nullptr && rel_dex_location[0] != '/') {
// Strip :classes<N>.dex used for secondary multidex files.
- std::string base = DexFile::GetBaseLocation(rel_dex_location);
- std::string multidex_suffix = DexFile::GetMultiDexSuffix(rel_dex_location);
+ std::string base = DexFileLoader::GetBaseLocation(rel_dex_location);
+ std::string multidex_suffix = DexFileLoader::GetMultiDexSuffix(rel_dex_location);
// Check if the base is a suffix of the provided abs_dex_location.
std::string target_suffix = "/" + base;
@@ -1194,6 +1329,33 @@
return with_internal;
}
+OatFile* OatFile::Open(int vdex_fd,
+ int oat_fd,
+ const std::string& oat_location,
+ uint8_t* requested_base,
+ uint8_t* oat_file_begin,
+ bool executable,
+ bool low_4gb,
+ const char* abs_dex_location,
+ std::string* error_msg) {
+ CHECK(!oat_location.empty()) << oat_location;
+
+ std::string vdex_location = GetVdexFilename(oat_location);
+
+ OatFile* with_internal = OatFileBase::OpenOatFile<ElfOatFile>(vdex_fd,
+ oat_fd,
+ vdex_location,
+ oat_location,
+ requested_base,
+ oat_file_begin,
+ false /* writable */,
+ executable,
+ low_4gb,
+ abs_dex_location,
+ error_msg);
+ return with_internal;
+}
+
OatFile* OatFile::OpenWritable(File* file,
const std::string& location,
const char* abs_dex_location,
@@ -1324,7 +1486,7 @@
oat_dex_file = secondary_lb->second; // May be null.
} else {
// We haven't seen this dex_location before, we must check the canonical location.
- std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
+ std::string dex_canonical_location = DexFileLoader::GetDexCanonicalLocation(dex_location);
if (dex_canonical_location != dex_location) {
StringPiece canonical_key(dex_canonical_location);
auto canonical_it = oat_dex_files_.find(canonical_key);
@@ -1342,7 +1504,7 @@
if (oat_dex_file == nullptr) {
if (error_msg != nullptr) {
- std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
+ std::string dex_canonical_location = DexFileLoader::GetDexCanonicalLocation(dex_location);
*error_msg = "Failed to find OatDexFile for DexFile " + std::string(dex_location)
+ " (canonical path " + dex_canonical_location + ") in OatFile " + GetLocation();
}
@@ -1352,7 +1514,7 @@
if (dex_location_checksum != nullptr &&
oat_dex_file->GetDexFileLocationChecksum() != *dex_location_checksum) {
if (error_msg != nullptr) {
- std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
+ std::string dex_canonical_location = DexFileLoader::GetDexCanonicalLocation(dex_location);
std::string checksum = StringPrintf("0x%08x", oat_dex_file->GetDexFileLocationChecksum());
std::string required_checksum = StringPrintf("0x%08x", *dex_location_checksum);
*error_msg = "OatDexFile for DexFile " + std::string(dex_location)
@@ -1408,14 +1570,14 @@
ScopedTrace trace(__PRETTY_FUNCTION__);
static constexpr bool kVerify = false;
static constexpr bool kVerifyChecksum = false;
- return DexFile::Open(dex_file_pointer_,
- FileSize(),
- dex_file_location_,
- dex_file_location_checksum_,
- this,
- kVerify,
- kVerifyChecksum,
- error_msg);
+ return DexFileLoader::Open(dex_file_pointer_,
+ FileSize(),
+ dex_file_location_,
+ dex_file_location_checksum_,
+ this,
+ kVerify,
+ kVerifyChecksum,
+ error_msg);
}
uint32_t OatFile::OatDexFile::GetOatClassOffset(uint16_t class_def_index) const {
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 04cb3a0..7d4e6df 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -89,6 +89,18 @@
const char* abs_dex_location,
std::string* error_msg);
+ // Similar to OatFile::Open(const std::string...), but accepts input vdex and
+ // odex files as file descriptors.
+ static OatFile* Open(int vdex_fd,
+ int oat_fd,
+ const std::string& oat_location,
+ uint8_t* requested_base,
+ uint8_t* oat_file_begin,
+ bool executable,
+ bool low_4gb,
+ const char* abs_dex_location,
+ std::string* error_msg);
+
// Open an oat file from an already opened File.
// Does not use dlopen underneath so cannot be used for runtime use
// where relocations may be required. Currently used from
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index f3a0725..a7fe9b1 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -27,6 +27,7 @@
#include "base/stl_util.h"
#include "class_linker.h"
#include "compiler_filter.h"
+#include "dex_file_loader.h"
#include "exec_utils.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
@@ -69,7 +70,9 @@
OatFileAssistant::OatFileAssistant(const char* dex_location,
const InstructionSet isa,
- bool load_executable)
+ bool load_executable,
+ int vdex_fd,
+ int oat_fd)
: isa_(isa),
load_executable_(load_executable),
odex_(this, /*is_oat_location*/ false),
@@ -109,7 +112,7 @@
std::string error_msg;
std::string odex_file_name;
if (DexLocationToOdexFilename(dex_location_, isa_, &odex_file_name, &error_msg)) {
- odex_.Reset(odex_file_name);
+ odex_.Reset(odex_file_name, vdex_fd, oat_fd);
} else {
LOG(WARNING) << "Failed to determine odex file name: " << error_msg;
}
@@ -132,7 +135,7 @@
LOG(WARNING) << "Failed to determine dex file parent directory: " << dex_location_;
} else {
std::string parent = dex_location_.substr(0, pos);
- if (access(parent.c_str(), W_OK) == 0) {
+ if (access(parent.c_str(), W_OK) == 0 || oat_fd > 0) {
dex_parent_writable_ = true;
} else {
VLOG(oat) << "Dex parent of " << dex_location_ << " is not writable: " << strerror(errno);
@@ -349,7 +352,7 @@
// Load the rest of the multidex entries
for (size_t i = 1;; i++) {
- std::string multidex_dex_location = DexFile::GetMultiDexLocation(i, dex_location.c_str());
+ std::string multidex_dex_location = DexFileLoader::GetMultiDexLocation(i, dex_location.c_str());
oat_dex_file = oat_file.GetOatDexFile(multidex_dex_location.c_str(), nullptr);
if (oat_dex_file == nullptr) {
// There are no more multidex entries to load.
@@ -401,7 +404,7 @@
uint32_t expected_checksum = (*required_dex_checksums)[i];
uint32_t actual_checksum = file.GetLocationChecksum(i);
if (expected_checksum != actual_checksum) {
- std::string dex = DexFile::GetMultiDexLocation(i, dex_location_.c_str());
+ std::string dex = DexFileLoader::GetMultiDexLocation(i, dex_location_.c_str());
*error_msg = StringPrintf("Dex checksum does not match for dex: %s."
"Expected: %u, actual: %u",
dex.c_str(),
@@ -430,7 +433,7 @@
}
for (uint32_t i = 0; i < number_of_dex_files; i++) {
- std::string dex = DexFile::GetMultiDexLocation(i, dex_location_.c_str());
+ std::string dex = DexFileLoader::GetMultiDexLocation(i, dex_location_.c_str());
uint32_t expected_checksum = (*required_dex_checksums)[i];
const OatFile::OatDexFile* oat_dex_file = file.GetOatDexFile(dex.c_str(), nullptr);
if (oat_dex_file == nullptr) {
@@ -863,9 +866,9 @@
required_dex_checksums_found_ = false;
cached_required_dex_checksums_.clear();
std::string error_msg;
- if (DexFile::GetMultiDexChecksums(dex_location_.c_str(),
- &cached_required_dex_checksums_,
- &error_msg)) {
+ if (DexFileLoader::GetMultiDexChecksums(dex_location_.c_str(),
+ &cached_required_dex_checksums_,
+ &error_msg)) {
required_dex_checksums_found_ = true;
has_original_dex_files_ = true;
} else {
@@ -879,7 +882,7 @@
if (odex_file != nullptr) {
required_dex_checksums_found_ = true;
for (size_t i = 0; i < odex_file->GetOatHeader().GetDexFileCount(); i++) {
- std::string dex = DexFile::GetMultiDexLocation(i, dex_location_.c_str());
+ std::string dex = DexFileLoader::GetMultiDexLocation(i, dex_location_.c_str());
const OatFile::OatDexFile* odex_dex_file = odex_file->GetOatDexFile(dex.c_str(), nullptr);
if (odex_dex_file == nullptr) {
required_dex_checksums_found_ = false;
@@ -1016,11 +1019,28 @@
// Check to see if there is a vdex file we can make use of.
std::string error_msg;
std::string vdex_filename = GetVdexFilename(filename_);
- std::unique_ptr<VdexFile> vdex = VdexFile::Open(vdex_filename,
- /*writeable*/false,
- /*low_4gb*/false,
- /*unquicken*/false,
- &error_msg);
+ std::unique_ptr<VdexFile> vdex;
+ if (vdex_fd_ == -1) {
+ vdex = VdexFile::Open(vdex_filename,
+ false /*writeable*/,
+ false /*low_4gb*/,
+ false /*unquicken*/,
+ &error_msg);
+ } else {
+ struct stat s;
+ int rc = TEMP_FAILURE_RETRY(fstat(vdex_fd_, &s));
+ if (rc == -1) {
+ PLOG(WARNING) << "Failed getting length of vdex file";
+ } else {
+ vdex = VdexFile::Open(vdex_fd_,
+ s.st_size,
+ vdex_filename,
+ false /*writable*/,
+ false /*low_4gb*/,
+ false /* unquicken */,
+ &error_msg);
+ }
+ }
if (vdex == nullptr) {
status_ = kOatCannotOpen;
VLOG(oat) << "unable to open vdex file " << vdex_filename << ": " << error_msg;
@@ -1095,14 +1115,26 @@
load_attempted_ = true;
if (filename_provided_) {
std::string error_msg;
- file_.reset(OatFile::Open(filename_.c_str(),
- filename_.c_str(),
- nullptr,
- nullptr,
- oat_file_assistant_->load_executable_,
- /*low_4gb*/false,
- oat_file_assistant_->dex_location_.c_str(),
- &error_msg));
+ if (oat_fd_ != -1 && vdex_fd_ != -1) {
+ file_.reset(OatFile::Open(vdex_fd_,
+ oat_fd_,
+ filename_.c_str(),
+ nullptr,
+ nullptr,
+ oat_file_assistant_->load_executable_,
+ false /* low_4gb */,
+ oat_file_assistant_->dex_location_.c_str(),
+ &error_msg));
+ } else {
+ file_.reset(OatFile::Open(filename_.c_str(),
+ filename_.c_str(),
+ nullptr,
+ nullptr,
+ oat_file_assistant_->load_executable_,
+ false /* low_4gb */,
+ oat_file_assistant_->dex_location_.c_str(),
+ &error_msg));
+ }
if (file_.get() == nullptr) {
VLOG(oat) << "OatFileAssistant test for existing oat file "
<< filename_ << ": " << error_msg;
@@ -1169,9 +1201,12 @@
status_attempted_ = false;
}
-void OatFileAssistant::OatFileInfo::Reset(const std::string& filename) {
+void OatFileAssistant::OatFileInfo::Reset(const std::string& filename, int vdex_fd,
+ int oat_fd) {
filename_provided_ = true;
filename_ = filename;
+ vdex_fd_ = vdex_fd;
+ oat_fd_ = oat_fd;
Reset();
}
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 6dc3c19..0f74ca4 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -121,7 +121,9 @@
// executable code for this dex location.
OatFileAssistant(const char* dex_location,
const InstructionSet isa,
- bool load_executable);
+ bool load_executable,
+ int vdex_fd = -1,
+ int oat_fd = -1);
~OatFileAssistant();
@@ -349,7 +351,7 @@
// Clear any cached information and switch to getting info about the oat
// file with the given filename.
- void Reset(const std::string& filename);
+ void Reset(const std::string& filename, int vdex_fd = -1, int oat_fd = -1);
// Release the loaded oat file for runtime use.
// Returns null if the oat file hasn't been loaded or is out of date.
@@ -386,6 +388,9 @@
bool filename_provided_ = false;
std::string filename_;
+ int oat_fd_ = -1;
+ int vdex_fd_ = -1;
+
bool load_attempted_ = false;
std::unique_ptr<OatFile> file_;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 3ecd1b5..d99036d 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -20,6 +20,7 @@
#include <string>
#include <vector>
+#include <fcntl.h>
#include <gtest/gtest.h>
@@ -222,6 +223,125 @@
EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles());
}
+// Case: Passing valid file descriptors of updated odex/vdex filesalong with
+// the dex file.
+// Expect: The status is kNoDexOptNeeded.
+TEST_F(OatFileAssistantTest, GetDexOptNeededWithFd) {
+ std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
+ std::string odex_location = GetScratchDir() + "/OatUpToDate.odex";
+ std::string vdex_location = GetScratchDir() + "/OatUpToDate.vdex";
+
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str(),
+ odex_location.c_str(),
+ CompilerFilter::kSpeed,
+ true,
+ false,
+ false);
+
+ android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY));
+ android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY));
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(),
+ kRuntimeISA,
+ false,
+ vdex_fd.get(),
+ odex_fd.get());
+ EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+ EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
+ EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+ EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
+ EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
+ EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles());
+}
+
+// Case: Passing valid odex fd, however, invalid fd for vdex with
+// the dex file.
+// Expect: The status is kDex2oatFromScratch.
+TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidVdexFd) {
+ std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
+ std::string odex_location = GetScratchDir() + "/OatUpToDate.odex";
+
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str(),
+ odex_location.c_str(),
+ CompilerFilter::kSpeed,
+ true,
+ false,
+ false);
+
+ android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY));
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(),
+ kRuntimeISA,
+ false,
+ -1,
+ odex_fd.get());
+ EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+ EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
+ EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
+}
+
+// Case: Passing valid vdex fd, however, invalid fd for odex with
+// the dex file.
+// Expect: The status is kDex2oatFromScratch.
+TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexFd) {
+ std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
+ std::string odex_location = GetScratchDir() + "/OatUpToDate.odex";
+ std::string vdex_location = GetScratchDir() + "/OatUpToDate.vdex";
+
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str(),
+ odex_location.c_str(),
+ CompilerFilter::kSpeed,
+ true,
+ false,
+ false);
+
+ android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY));
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(),
+ kRuntimeISA,
+ false,
+ vdex_fd.get(),
+ -1);
+ // Even though the vdex file is up to date, because we don't have the oat
+ // file, we can't know that the vdex depends on the boot image and is up to
+ // date with respect to the boot image. Instead we must assume the vdex file
+ // depends on the boot image and is out of date with respect to the boot
+ // image.
+ EXPECT_EQ(-OatFileAssistant::kDex2OatForBootImage,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+ EXPECT_EQ(OatFileAssistant::kOatBootImageOutOfDate, oat_file_assistant.OdexFileStatus());
+ EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
+}
+
+// Case: Passing invalid vdex and odex fd with the dex file.
+// Expect: The status is kDex2oatFromScratch.
+TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexVdexFd) {
+ std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
+
+ Copy(GetDexSrc1(), dex_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(),
+ kRuntimeISA,
+ false,
+ -1,
+ -1);
+ EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+ EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
+ EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
+}
+
// Case: We have a DEX file and up-to-date OAT file for it. We load the dex file
// via a symlink.
// Expect: The status is kNoDexOptNeeded.
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 66b24a9..1e7cf72 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -31,6 +31,7 @@
#include "class_linker.h"
#include "class_loader_context.h"
#include "dex_file-inl.h"
+#include "dex_file_loader.h"
#include "dex_file_tracking_registrar.h"
#include "gc/scoped_gc_critical_section.h"
#include "gc/space/image_space.h"
@@ -94,7 +95,7 @@
for (const std::unique_ptr<const OatFile>& oat_file : oat_files_) {
const std::vector<const OatDexFile*>& oat_dex_files = oat_file->GetOatDexFiles();
for (const OatDexFile* oat_dex_file : oat_dex_files) {
- if (DexFile::GetBaseLocation(oat_dex_file->GetDexFileLocation()) == dex_base_location) {
+ if (DexFileLoader::GetBaseLocation(oat_dex_file->GetDexFileLocation()) == dex_base_location) {
return oat_file.get();
}
}
@@ -596,7 +597,7 @@
if (oat_file_assistant.HasOriginalDexFiles()) {
if (Runtime::Current()->IsDexFileFallbackEnabled()) {
static constexpr bool kVerifyChecksum = true;
- if (!DexFile::Open(
+ if (!DexFileLoader::Open(
dex_location, dex_location, kVerifyChecksum, /*out*/ &error_msg, &dex_files)) {
LOG(WARNING) << error_msg;
error_msgs->push_back("Failed to open dex files from " + std::string(dex_location)
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 1d524fd..9888186 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -310,6 +310,9 @@
.Define("-XX:ThreadSuspendTimeout=_") // in ms
.WithType<MillisecondsToNanoseconds>() // store as ns
.IntoKey(M::ThreadSuspendTimeout)
+ .Define("-XX:GlobalRefAllocStackTraceLimit=_") // Number of free slots to enable tracing.
+ .WithType<unsigned int>()
+ .IntoKey(M::GlobalRefAllocStackTraceLimit)
.Define("-XX:SlowDebug=_")
.WithType<bool>()
.WithValueMap({{"false", false}, {"true", true}})
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 7c05cb6..a4ed21e 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -67,6 +67,7 @@
#include "class_linker-inl.h"
#include "compiler_callbacks.h"
#include "debugger.h"
+#include "dex_file_loader.h"
#include "elf_file.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "experimental_flags.h"
@@ -1020,7 +1021,7 @@
LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'";
continue;
}
- if (!DexFile::Open(dex_filename, dex_location, kVerifyChecksum, &error_msg, dex_files)) {
+ if (!DexFileLoader::Open(dex_filename, dex_location, kVerifyChecksum, &error_msg, dex_files)) {
LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
++failure_count;
}
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 78a60fa..cafae22 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -145,4 +145,6 @@
RUNTIME_OPTIONS_KEY (bool, SlowDebug, false)
+RUNTIME_OPTIONS_KEY (unsigned int, GlobalRefAllocStackTraceLimit, 0) // 0 = off
+
#undef RUNTIME_OPTIONS_KEY
diff --git a/runtime/utils.cc b/runtime/utils.cc
index b72dec6..1f6bd74 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -48,6 +48,7 @@
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "dex_file-inl.h"
+#include "dex_file_loader.h"
#include "dex_instruction.h"
#include "oat_quick_method_header.h"
#include "os.h"
@@ -858,7 +859,7 @@
!android::base::EndsWith(location, ".art") &&
!android::base::EndsWith(location, ".oat")) {
cache_file += "/";
- cache_file += DexFile::kClassesDex;
+ cache_file += DexFileLoader::kClassesDex;
}
std::replace(cache_file.begin(), cache_file.end(), '/', '@');
*filename = StringPrintf("%s/%s", cache_location, cache_file.c_str());
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index b955220..55bc9ec 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -25,6 +25,7 @@
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "dex_file.h"
+#include "dex_file_loader.h"
#include "dex_to_dex_decompiler.h"
namespace art {
@@ -151,15 +152,15 @@
size_t size = reinterpret_cast<const DexFile::Header*>(dex_file_start)->file_size_;
// TODO: Supply the location information for a vdex file.
static constexpr char kVdexLocation[] = "";
- std::string location = DexFile::GetMultiDexLocation(i, kVdexLocation);
- std::unique_ptr<const DexFile> dex(DexFile::Open(dex_file_start,
- size,
- location,
- GetLocationChecksum(i),
- nullptr /*oat_dex_file*/,
- false /*verify*/,
- false /*verify_checksum*/,
- error_msg));
+ std::string location = DexFileLoader::GetMultiDexLocation(i, kVdexLocation);
+ std::unique_ptr<const DexFile> dex(DexFileLoader::Open(dex_file_start,
+ size,
+ location,
+ GetLocationChecksum(i),
+ nullptr /*oat_dex_file*/,
+ false /*verify*/,
+ false /*verify_checksum*/,
+ error_msg));
if (dex == nullptr) {
return false;
}
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 7246bae..0033167 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -71,8 +71,8 @@
// sure we only print this once.
static bool gPrintedDxMonitorText = false;
-PcToRegisterLineTable::PcToRegisterLineTable(ScopedArenaAllocator& arena)
- : register_lines_(arena.Adapter(kArenaAllocVerifier)) {}
+PcToRegisterLineTable::PcToRegisterLineTable(ScopedArenaAllocator& allocator)
+ : register_lines_(allocator.Adapter(kArenaAllocVerifier)) {}
void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* flags,
uint32_t insns_size, uint16_t registers_size,
@@ -552,9 +552,9 @@
bool allow_thread_suspension)
: self_(self),
arena_stack_(Runtime::Current()->GetArenaPool()),
- arena_(&arena_stack_),
- reg_types_(can_load_classes, arena_),
- reg_table_(arena_),
+ allocator_(&arena_stack_),
+ reg_types_(can_load_classes, allocator_),
+ reg_table_(allocator_),
work_insn_idx_(dex::kDexNoIndex),
dex_method_idx_(dex_method_idx),
mirror_method_(method),
@@ -868,7 +868,7 @@
}
// Allocate and initialize an array to hold instruction data.
- insn_flags_.reset(arena_.AllocArray<InstructionFlags>(code_item_->insns_size_in_code_units_));
+ insn_flags_.reset(allocator_.AllocArray<InstructionFlags>(code_item_->insns_size_in_code_units_));
DCHECK(insn_flags_ != nullptr);
std::uninitialized_fill_n(insn_flags_.get(),
code_item_->insns_size_in_code_units_,
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 57ab56c..1f1d7c1 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -67,7 +67,7 @@
// execution of that instruction.
class PcToRegisterLineTable {
public:
- explicit PcToRegisterLineTable(ScopedArenaAllocator& arena);
+ explicit PcToRegisterLineTable(ScopedArenaAllocator& allocator);
~PcToRegisterLineTable();
// Initialize the RegisterTable. Every instruction address can have a different set of information
@@ -222,7 +222,7 @@
}
ScopedArenaAllocator& GetScopedAllocator() {
- return arena_;
+ return allocator_;
}
private:
@@ -711,7 +711,7 @@
// Arena allocator.
ArenaStack arena_stack_;
- ScopedArenaAllocator arena_;
+ ScopedArenaAllocator allocator_;
RegTypeCache reg_types_;
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 0c00868..4ebe151 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -164,7 +164,7 @@
}
StringPiece RegTypeCache::AddString(const StringPiece& string_piece) {
- char* ptr = arena_.AllocArray<char>(string_piece.length());
+ char* ptr = allocator_.AllocArray<char>(string_piece.length());
memcpy(ptr, string_piece.data(), string_piece.length());
return StringPiece(ptr, string_piece.length());
}
@@ -197,9 +197,10 @@
if (klass->CannotBeAssignedFromOtherTypes() || precise) {
DCHECK(!(klass->IsAbstract()) || klass->IsArrayClass());
DCHECK(!klass->IsInterface());
- entry = new (&arena_) PreciseReferenceType(klass, AddString(sp_descriptor), entries_.size());
+ entry =
+ new (&allocator_) PreciseReferenceType(klass, AddString(sp_descriptor), entries_.size());
} else {
- entry = new (&arena_) ReferenceType(klass, AddString(sp_descriptor), entries_.size());
+ entry = new (&allocator_) ReferenceType(klass, AddString(sp_descriptor), entries_.size());
}
return AddEntry(entry);
} else { // Class not resolved.
@@ -213,7 +214,7 @@
}
if (IsValidDescriptor(descriptor)) {
return AddEntry(
- new (&arena_) UnresolvedReferenceType(AddString(sp_descriptor), entries_.size()));
+ new (&allocator_) UnresolvedReferenceType(AddString(sp_descriptor), entries_.size()));
} else {
// The descriptor is broken return the unknown type as there's nothing sensible that
// could be done at runtime
@@ -224,7 +225,7 @@
const RegType& RegTypeCache::MakeUnresolvedReference() {
// The descriptor is intentionally invalid so nothing else will match this type.
- return AddEntry(new (&arena_) UnresolvedReferenceType(AddString("a"), entries_.size()));
+ return AddEntry(new (&allocator_) UnresolvedReferenceType(AddString("a"), entries_.size()));
}
const RegType* RegTypeCache::FindClass(mirror::Class* klass, bool precise) const {
@@ -253,8 +254,8 @@
DCHECK(FindClass(klass, precise) == nullptr);
RegType* const reg_type = precise
? static_cast<RegType*>(
- new (&arena_) PreciseReferenceType(klass, descriptor, entries_.size()))
- : new (&arena_) ReferenceType(klass, descriptor, entries_.size());
+ new (&allocator_) PreciseReferenceType(klass, descriptor, entries_.size()))
+ : new (&allocator_) ReferenceType(klass, descriptor, entries_.size());
return &AddEntry(reg_type);
}
@@ -267,11 +268,11 @@
return *reg_type;
}
-RegTypeCache::RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena)
- : entries_(arena.Adapter(kArenaAllocVerifier)),
- klass_entries_(arena.Adapter(kArenaAllocVerifier)),
+RegTypeCache::RegTypeCache(bool can_load_classes, ScopedArenaAllocator& allocator)
+ : entries_(allocator.Adapter(kArenaAllocVerifier)),
+ klass_entries_(allocator.Adapter(kArenaAllocVerifier)),
can_load_classes_(can_load_classes),
- arena_(arena) {
+ allocator_(allocator) {
if (kIsDebugBuild) {
Thread::Current()->AssertThreadSuspensionIsAllowable(gAborting == 0);
}
@@ -349,7 +350,7 @@
const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left,
const RegType& right,
MethodVerifier* verifier) {
- ArenaBitVector types(&arena_,
+ ArenaBitVector types(&allocator_,
kDefaultArenaBitVectorBytes * kBitsPerByte, // Allocate at least 8 bytes.
true); // Is expandable.
const RegType* left_resolved;
@@ -426,10 +427,10 @@
}
}
}
- return AddEntry(new (&arena_) UnresolvedMergedType(resolved_parts_merged,
- types,
- this,
- entries_.size()));
+ return AddEntry(new (&allocator_) UnresolvedMergedType(resolved_parts_merged,
+ types,
+ this,
+ entries_.size()));
}
const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
@@ -446,7 +447,7 @@
}
}
}
- return AddEntry(new (&arena_) UnresolvedSuperClass(child.GetId(), this, entries_.size()));
+ return AddEntry(new (&allocator_) UnresolvedSuperClass(child.GetId(), this, entries_.size()));
}
const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
@@ -462,9 +463,9 @@
return *down_cast<const UnresolvedUninitializedRefType*>(cur_entry);
}
}
- entry = new (&arena_) UnresolvedUninitializedRefType(descriptor,
- allocation_pc,
- entries_.size());
+ entry = new (&allocator_) UnresolvedUninitializedRefType(descriptor,
+ allocation_pc,
+ entries_.size());
} else {
mirror::Class* klass = type.GetClass();
for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -476,10 +477,10 @@
return *down_cast<const UninitializedReferenceType*>(cur_entry);
}
}
- entry = new (&arena_) UninitializedReferenceType(klass,
- descriptor,
- allocation_pc,
- entries_.size());
+ entry = new (&allocator_) UninitializedReferenceType(klass,
+ descriptor,
+ allocation_pc,
+ entries_.size());
}
return AddEntry(entry);
}
@@ -496,7 +497,7 @@
return *cur_entry;
}
}
- entry = new (&arena_) UnresolvedReferenceType(descriptor, entries_.size());
+ entry = new (&allocator_) UnresolvedReferenceType(descriptor, entries_.size());
} else {
mirror::Class* klass = uninit_type.GetClass();
if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
@@ -507,7 +508,7 @@
return *cur_entry;
}
}
- entry = new (&arena_) ReferenceType(klass, "", entries_.size());
+ entry = new (&allocator_) ReferenceType(klass, "", entries_.size());
} else if (!klass->IsPrimitive()) {
// We're uninitialized because of allocation, look or create a precise type as allocations
// may only create objects of that type.
@@ -526,9 +527,9 @@
return *cur_entry;
}
}
- entry = new (&arena_) PreciseReferenceType(klass,
- uninit_type.GetDescriptor(),
- entries_.size());
+ entry = new (&allocator_) PreciseReferenceType(klass,
+ uninit_type.GetDescriptor(),
+ entries_.size());
} else {
return Conflict();
}
@@ -547,7 +548,7 @@
return *down_cast<const UninitializedType*>(cur_entry);
}
}
- entry = new (&arena_) UnresolvedUninitializedThisRefType(descriptor, entries_.size());
+ entry = new (&allocator_) UnresolvedUninitializedThisRefType(descriptor, entries_.size());
} else {
mirror::Class* klass = type.GetClass();
for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -556,7 +557,7 @@
return *down_cast<const UninitializedType*>(cur_entry);
}
}
- entry = new (&arena_) UninitializedThisReferenceType(klass, descriptor, entries_.size());
+ entry = new (&allocator_) UninitializedThisReferenceType(klass, descriptor, entries_.size());
}
return AddEntry(entry);
}
@@ -572,9 +573,9 @@
}
ConstantType* entry;
if (precise) {
- entry = new (&arena_) PreciseConstType(value, entries_.size());
+ entry = new (&allocator_) PreciseConstType(value, entries_.size());
} else {
- entry = new (&arena_) ImpreciseConstType(value, entries_.size());
+ entry = new (&allocator_) ImpreciseConstType(value, entries_.size());
}
return AddEntry(entry);
}
@@ -589,9 +590,9 @@
}
ConstantType* entry;
if (precise) {
- entry = new (&arena_) PreciseConstLoType(value, entries_.size());
+ entry = new (&allocator_) PreciseConstLoType(value, entries_.size());
} else {
- entry = new (&arena_) ImpreciseConstLoType(value, entries_.size());
+ entry = new (&allocator_) ImpreciseConstLoType(value, entries_.size());
}
return AddEntry(entry);
}
@@ -606,9 +607,9 @@
}
ConstantType* entry;
if (precise) {
- entry = new (&arena_) PreciseConstHiType(value, entries_.size());
+ entry = new (&allocator_) PreciseConstHiType(value, entries_.size());
} else {
- entry = new (&arena_) ImpreciseConstHiType(value, entries_.size());
+ entry = new (&allocator_) ImpreciseConstHiType(value, entries_.size());
}
return AddEntry(entry);
}
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 96eca05..74d9e9d 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -61,7 +61,7 @@
class RegTypeCache {
public:
- explicit RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena);
+ explicit RegTypeCache(bool can_load_classes, ScopedArenaAllocator& allocator);
~RegTypeCache();
static void Init() REQUIRES_SHARED(Locks::mutator_lock_) {
if (!RegTypeCache::primitive_initialized_) {
@@ -201,7 +201,7 @@
const bool can_load_classes_;
// Arena allocator.
- ScopedArenaAllocator& arena_;
+ ScopedArenaAllocator& allocator_;
DISALLOW_COPY_AND_ASSIGN(RegTypeCache);
};
diff --git a/test/1939-proxy-frames/src/art/Test1939.java b/test/1939-proxy-frames/src/art/Test1939.java
index 6147d1e..83d0d2c 100644
--- a/test/1939-proxy-frames/src/art/Test1939.java
+++ b/test/1939-proxy-frames/src/art/Test1939.java
@@ -161,9 +161,6 @@
public static void run() throws Exception {
Locals.EnableLocalVariableAccess();
- final TestCase[] MAIN_TEST_CASES = new TestCase[] {
- };
-
TestCase test = new TestCase(
getProxyObject(Foo.class), getMethod(Foo.class, "InterfaceProxyMethod"));
test.exec(NamedGet("This", Locals::GetLocalInstance));
diff --git a/test/983-source-transform-verify/source_transform.cc b/test/983-source-transform-verify/source_transform.cc
index 570ade3..ef67ace 100644
--- a/test/983-source-transform-verify/source_transform.cc
+++ b/test/983-source-transform-verify/source_transform.cc
@@ -29,6 +29,7 @@
#include "base/macros.h"
#include "bytecode_utils.h"
#include "dex_file.h"
+#include "dex_file_loader.h"
#include "dex_instruction.h"
#include "jit/jit.h"
#include "native_stack_dump.h"
@@ -66,14 +67,14 @@
return;
}
std::string error;
- std::unique_ptr<const DexFile> dex(DexFile::Open(class_data,
- class_data_len,
- "fake_location.dex",
- /*location_checksum*/ 0,
- /*oat_dex_file*/ nullptr,
- /*verify*/ true,
- /*verify_checksum*/ true,
- &error));
+ std::unique_ptr<const DexFile> dex(DexFileLoader::Open(class_data,
+ class_data_len,
+ "fake_location.dex",
+ /*location_checksum*/ 0,
+ /*oat_dex_file*/ nullptr,
+ /*verify*/ true,
+ /*verify_checksum*/ true,
+ &error));
if (dex.get() == nullptr) {
std::cout << "Failed to verify dex file for " << name << " because " << error << std::endl;
return;
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index d37e6bc..9e1afc8 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -71,9 +71,9 @@
PROFILE="n"
RANDOM_PROFILE="n"
# The normal dex2oat timeout.
-DEX2OAT_TIMEOUT="60"
+DEX2OAT_TIMEOUT="300" # 5 mins
# The *hard* timeout where we really start trying to kill the dex2oat.
-DEX2OAT_RT_TIMEOUT="90"
+DEX2OAT_RT_TIMEOUT="360" # 6 mins
# if "y", set -Xstacktracedir and inform the test of its location. When
# this is set, stack trace dumps (from signal 3) will be written to a file
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 68e9eb8..47b2f22 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -1,5 +1,12 @@
[
{
+ "tests": [ "1939-proxy-frames", "1914-get-local-instance" ],
+ "description": ["Test 1939 & 1914 seems to consistently fail in gcstress on 64 bit with",
+ "a proxy this object having no associated class!"],
+ "variant": "gcstress",
+ "bug": "http://b/67679263"
+ },
+ {
"tests": "1934-jvmti-signal-thread",
"description": ["Disables 1934-jvmti-signal-thread in tracing configurations"],
"variant": "trace | stream",