ART: Remove MIRGraph::dex_pc_to_block_map_
This patch removes MIRGraph::dex_pc_to_block_map_, adds a local
variable dex_pc_to_block_map inside MIRGraph::InlineMethod(), and
updates several functions to pass dex_pc_to_block_map.
The goal is to limit the scope of dex_pc_to_block_map and
the usage of FindBlock, so that various compiler optimizations
cannot rely on dex pc to look up basic blocks to avoid
duplicated dex pc issues.
Also, this patch changes quick targets to use successor blocks
for switch case target generation at Mir2Lir::InstallSwitchTables().
Change-Id: I9f571efebd2706b4e1606279bd61f3b406ecd1c4
Signed-off-by: Chao-ying Fu <chao-ying.fu@intel.com>
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 0f7d45d..93a31e9 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -113,7 +113,6 @@
entry_block_(NULL),
exit_block_(NULL),
current_code_item_(NULL),
- dex_pc_to_block_map_(arena->Adapter()),
m_units_(arena->Adapter()),
method_stack_(arena->Adapter()),
current_method_(kInvalidEntry),
@@ -268,31 +267,14 @@
DCHECK(insn != orig_block->first_mir_insn);
DCHECK(insn == bottom_block->first_mir_insn);
DCHECK_EQ(insn->offset, bottom_block->start_offset);
- DCHECK_EQ(dex_pc_to_block_map_[insn->offset], orig_block->id);
// Scan the "bottom" instructions, remapping them to the
// newly created "bottom" block.
MIR* p = insn;
p->bb = bottom_block->id;
- dex_pc_to_block_map_[p->offset] = bottom_block->id;
while (p != bottom_block->last_mir_insn) {
p = p->next;
DCHECK(p != nullptr);
p->bb = bottom_block->id;
- int opcode = p->dalvikInsn.opcode;
- /*
- * Some messiness here to ensure that we only enter real opcodes and only the
- * first half of a potentially throwing instruction that has been split into
- * CHECK and work portions. Since the 2nd half of a split operation is always
- * the first in a BasicBlock, we can't hit it here.
- */
- if ((opcode == kMirOpCheck) || !MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
- BasicBlockId mapped_id = dex_pc_to_block_map_[p->offset];
- // At first glance the instructions should all be mapped to orig_block.
- // However, multiple instructions may correspond to the same dex, hence an earlier
- // instruction may have already moved the mapping for dex to bottom_block.
- DCHECK((mapped_id == orig_block->id) || (mapped_id == bottom_block->id));
- dex_pc_to_block_map_[p->offset] = bottom_block->id;
- }
}
return bottom_block;
@@ -307,12 +289,13 @@
* Utilizes a map for fast lookup of the typical cases.
*/
BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool create,
- BasicBlock** immed_pred_block_p) {
+ BasicBlock** immed_pred_block_p,
+ ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
if (code_offset >= current_code_item_->insns_size_in_code_units_) {
return nullptr;
}
- int block_id = dex_pc_to_block_map_[code_offset];
+ int block_id = (*dex_pc_to_block_map)[code_offset];
BasicBlock* bb = GetBasicBlock(block_id);
if ((bb != nullptr) && (bb->start_offset == code_offset)) {
@@ -327,19 +310,46 @@
if (bb != nullptr) {
// The target exists somewhere in an existing block.
- return SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? immed_pred_block_p : nullptr);
+ BasicBlock* bottom_block = SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? immed_pred_block_p : nullptr);
+ DCHECK(bottom_block != nullptr);
+ MIR* p = bottom_block->first_mir_insn;
+ BasicBlock* orig_block = bb;
+ DCHECK_EQ((*dex_pc_to_block_map)[p->offset], orig_block->id);
+ // Scan the "bottom" instructions, remapping them to the
+ // newly created "bottom" block.
+ (*dex_pc_to_block_map)[p->offset] = bottom_block->id;
+ while (p != bottom_block->last_mir_insn) {
+ p = p->next;
+ DCHECK(p != nullptr);
+ int opcode = p->dalvikInsn.opcode;
+ /*
+ * Some messiness here to ensure that we only enter real opcodes and only the
+ * first half of a potentially throwing instruction that has been split into
+ * CHECK and work portions. Since the 2nd half of a split operation is always
+ * the first in a BasicBlock, we can't hit it here.
+ */
+ if ((opcode == kMirOpCheck) || !MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
+ BasicBlockId mapped_id = (*dex_pc_to_block_map)[p->offset];
+ // At first glance the instructions should all be mapped to orig_block.
+ // However, multiple instructions may correspond to the same dex, hence an earlier
+ // instruction may have already moved the mapping for dex to bottom_block.
+ DCHECK((mapped_id == orig_block->id) || (mapped_id == bottom_block->id));
+ (*dex_pc_to_block_map)[p->offset] = bottom_block->id;
+ }
+ }
+ return bottom_block;
}
// Create a new block.
bb = CreateNewBB(kDalvikByteCode);
bb->start_offset = code_offset;
- dex_pc_to_block_map_[bb->start_offset] = bb->id;
+ (*dex_pc_to_block_map)[bb->start_offset] = bb->id;
return bb;
}
/* Identify code range in try blocks and set up the empty catch blocks */
-void MIRGraph::ProcessTryCatchBlocks() {
+void MIRGraph::ProcessTryCatchBlocks(ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
int tries_size = current_code_item_->tries_size_;
DexOffset offset;
@@ -364,7 +374,7 @@
CatchHandlerIterator iterator(handlers_ptr);
for (; iterator.HasNext(); iterator.Next()) {
uint32_t address = iterator.GetHandlerAddress();
- FindBlock(address, true /*create*/, /* immed_pred_block_p */ nullptr);
+ FindBlock(address, true /*create*/, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map);
}
handlers_ptr = iterator.EndDataPointer();
}
@@ -439,7 +449,8 @@
/* Process instructions with the kBranch flag */
BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
int width, int flags, const uint16_t* code_ptr,
- const uint16_t* code_end) {
+ const uint16_t* code_end,
+ ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
DexOffset target = cur_offset;
switch (insn->dalvikInsn.opcode) {
case Instruction::GOTO:
@@ -470,7 +481,8 @@
}
CountBranch(target);
BasicBlock* taken_block = FindBlock(target, /* create */ true,
- /* immed_pred_block_p */ &cur_block);
+ /* immed_pred_block_p */ &cur_block,
+ dex_pc_to_block_map);
cur_block->taken = taken_block->id;
taken_block->predecessors.push_back(cur_block->id);
@@ -480,18 +492,20 @@
/* create */
true,
/* immed_pred_block_p */
- &cur_block);
+ &cur_block,
+ dex_pc_to_block_map);
cur_block->fall_through = fallthrough_block->id;
fallthrough_block->predecessors.push_back(cur_block->id);
} else if (code_ptr < code_end) {
- FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr);
+ FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map);
}
return cur_block;
}
/* Process instructions with the kSwitch flag */
BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
- int width, int flags) {
+ int width, int flags,
+ ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
UNUSED(flags);
const uint16_t* switch_data =
reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB);
@@ -545,7 +559,8 @@
for (i = 0; i < size; i++) {
BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* create */ true,
- /* immed_pred_block_p */ &cur_block);
+ /* immed_pred_block_p */ &cur_block,
+ dex_pc_to_block_map);
SuccessorBlockInfo* successor_block_info =
static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
kArenaAllocSuccessor));
@@ -559,7 +574,8 @@
/* Fall-through case */
BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* create */ true,
- /* immed_pred_block_p */ nullptr);
+ /* immed_pred_block_p */ nullptr,
+ dex_pc_to_block_map);
cur_block->fall_through = fallthrough_block->id;
fallthrough_block->predecessors.push_back(cur_block->id);
return cur_block;
@@ -568,7 +584,8 @@
/* Process instructions with the kThrow flag */
BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
int width, int flags, ArenaBitVector* try_block_addr,
- const uint16_t* code_ptr, const uint16_t* code_end) {
+ const uint16_t* code_ptr, const uint16_t* code_end,
+ ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
UNUSED(flags);
bool in_try_block = try_block_addr->IsBitSet(cur_offset);
bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW);
@@ -585,7 +602,8 @@
for (; iterator.HasNext(); iterator.Next()) {
BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* create */,
- nullptr /* immed_pred_block_p */);
+ nullptr /* immed_pred_block_p */,
+ dex_pc_to_block_map);
if (insn->dalvikInsn.opcode == Instruction::MONITOR_EXIT &&
IsBadMonitorExitCatch(insn->offset, catch_block->start_offset)) {
// Don't allow monitor-exit to catch its own exception, http://b/15745363 .
@@ -620,7 +638,7 @@
cur_block->explicit_throw = true;
if (code_ptr < code_end) {
// Force creation of new block following THROW via side-effect.
- FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr);
+ FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map);
}
if (!in_try_block) {
// Don't split a THROW that can't rethrow - we're done.
@@ -652,7 +670,7 @@
* not automatically terminated after the work portion, and may
* contain following instructions.
*
- * Note also that the dex_pc_to_block_map_ entry for the potentially
+ * Note also that the dex_pc_to_block_map entry for the potentially
* throwing instruction will refer to the original basic block.
*/
BasicBlock* new_block = CreateNewBB(kDalvikByteCode);
@@ -687,7 +705,11 @@
// TODO: need to rework expansion of block list & try_block_addr when inlining activated.
// TUNING: use better estimate of basic blocks for following resize.
block_list_.reserve(block_list_.size() + current_code_item_->insns_size_in_code_units_);
- dex_pc_to_block_map_.resize(dex_pc_to_block_map_.size() + current_code_item_->insns_size_in_code_units_);
+ // FindBlock lookup cache.
+ ScopedArenaAllocator allocator(&cu_->arena_stack);
+ ScopedArenaVector<uint16_t> dex_pc_to_block_map(allocator.Adapter());
+ dex_pc_to_block_map.resize(dex_pc_to_block_map.size() +
+ current_code_item_->insns_size_in_code_units_);
// TODO: replace with explicit resize routine. Using automatic extension side effect for now.
try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_);
@@ -728,7 +750,7 @@
cur_block->predecessors.push_back(entry_block_->id);
/* Identify code range in try blocks and set up the empty catch blocks */
- ProcessTryCatchBlocks();
+ ProcessTryCatchBlocks(&dex_pc_to_block_map);
uint64_t merged_df_flags = 0u;
@@ -777,20 +799,21 @@
DCHECK(cur_block->taken == NullBasicBlockId);
// Unreachable instruction, mark for no continuation and end basic block.
flags &= ~Instruction::kContinue;
- FindBlock(current_offset_ + width, /* create */ true, /* immed_pred_block_p */ nullptr);
+ FindBlock(current_offset_ + width, /* create */ true,
+ /* immed_pred_block_p */ nullptr, &dex_pc_to_block_map);
}
} else {
cur_block->AppendMIR(insn);
}
// Associate the starting dex_pc for this opcode with its containing basic block.
- dex_pc_to_block_map_[insn->offset] = cur_block->id;
+ dex_pc_to_block_map[insn->offset] = cur_block->id;
code_ptr += width;
if (flags & Instruction::kBranch) {
cur_block = ProcessCanBranch(cur_block, insn, current_offset_,
- width, flags, code_ptr, code_end);
+ width, flags, code_ptr, code_end, &dex_pc_to_block_map);
} else if (flags & Instruction::kReturn) {
cur_block->terminated_by_return = true;
cur_block->fall_through = exit_block_->id;
@@ -804,13 +827,15 @@
* Create a fallthrough block for real instructions
* (incl. NOP).
*/
- FindBlock(current_offset_ + width, /* create */ true, /* immed_pred_block_p */ nullptr);
+ FindBlock(current_offset_ + width, /* create */ true,
+ /* immed_pred_block_p */ nullptr, &dex_pc_to_block_map);
}
} else if (flags & Instruction::kThrow) {
cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_,
- code_ptr, code_end);
+ code_ptr, code_end, &dex_pc_to_block_map);
} else if (flags & Instruction::kSwitch) {
- cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width, flags);
+ cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width,
+ flags, &dex_pc_to_block_map);
}
if (verify_flags & Instruction::kVerifyVarArgRange ||
verify_flags & Instruction::kVerifyVarArgRangeNonZero) {
@@ -828,7 +853,8 @@
}
current_offset_ += width;
BasicBlock* next_block = FindBlock(current_offset_, /* create */ false,
- /* immed_pred_block_p */ nullptr);
+ /* immed_pred_block_p */ nullptr,
+ &dex_pc_to_block_map);
if (next_block) {
/*
* The next instruction could be the target of a previously parsed
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 5def191..5914245 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -542,8 +542,9 @@
uint32_t method_idx, jobject class_loader, const DexFile& dex_file);
/* Find existing block */
- BasicBlock* FindBlock(DexOffset code_offset) {
- return FindBlock(code_offset, false, NULL);
+ BasicBlock* FindBlock(DexOffset code_offset,
+ ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
+ return FindBlock(code_offset, false, nullptr, dex_pc_to_block_map);
}
const uint16_t* GetCurrentInsns() const {
@@ -1249,16 +1250,20 @@
bool ContentIsInsn(const uint16_t* code_ptr);
BasicBlock* SplitBlock(DexOffset code_offset, BasicBlock* orig_block,
BasicBlock** immed_pred_block_p);
- BasicBlock* FindBlock(DexOffset code_offset, bool create, BasicBlock** immed_pred_block_p);
- void ProcessTryCatchBlocks();
+ BasicBlock* FindBlock(DexOffset code_offset, bool create, BasicBlock** immed_pred_block_p,
+ ScopedArenaVector<uint16_t>* dex_pc_to_block_map);
+ void ProcessTryCatchBlocks(ScopedArenaVector<uint16_t>* dex_pc_to_block_map);
bool IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset, NarrowDexOffset catch_offset);
BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
- int flags, const uint16_t* code_ptr, const uint16_t* code_end);
+ int flags, const uint16_t* code_ptr, const uint16_t* code_end,
+ ScopedArenaVector<uint16_t>* dex_pc_to_block_map);
BasicBlock* ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
- int flags);
+ int flags,
+ ScopedArenaVector<uint16_t>* dex_pc_to_block_map);
BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr,
- const uint16_t* code_end);
+ const uint16_t* code_end,
+ ScopedArenaVector<uint16_t>* dex_pc_to_block_map);
int AddNewSReg(int v_reg);
void HandleSSAUse(int* uses, int dalvik_reg, int reg_index);
void DataFlowSSAFormat35C(MIR* mir);
@@ -1391,7 +1396,6 @@
BasicBlock* entry_block_;
BasicBlock* exit_block_;
const DexFile::CodeItem* current_code_item_;
- ArenaVector<uint16_t> dex_pc_to_block_map_; // FindBlock lookup cache.
ArenaVector<DexCompilationUnit*> m_units_; // List of methods included in this graph
typedef std::pair<int, int> MIRLocation; // Insert point, (m_unit_ index, offset)
ArenaVector<MIRLocation> method_stack_; // Include stack
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index f15b727..1b5dde2 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -52,16 +52,13 @@
*/
void ArmMir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
- if (cu_->verbose) {
- DumpSparseSwitchTable(table);
- }
// Add the table to the list - we'll process it later
SwitchTable *tab_rec =
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
+ tab_rec->switch_mir = mir;
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
uint32_t size = table[1];
- tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
switch_tables_.push_back(tab_rec);
// Get the switch value
@@ -100,17 +97,13 @@
void ArmMir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
- if (cu_->verbose) {
- DumpPackedSwitchTable(table);
- }
// Add the table to the list - we'll process it later
SwitchTable *tab_rec =
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
+ tab_rec->switch_mir = mir;
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
uint32_t size = table[1];
- tab_rec->targets =
- static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
switch_tables_.push_back(tab_rec);
// Get the switch value
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 6492442..f5407ae 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -51,16 +51,13 @@
*/
void Arm64Mir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
- if (cu_->verbose) {
- DumpSparseSwitchTable(table);
- }
// Add the table to the list - we'll process it later
SwitchTable *tab_rec =
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
+ tab_rec->switch_mir = mir;
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
uint32_t size = table[1];
- tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
switch_tables_.push_back(tab_rec);
// Get the switch value
@@ -103,17 +100,13 @@
void Arm64Mir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
- if (cu_->verbose) {
- DumpPackedSwitchTable(table);
- }
// Add the table to the list - we'll process it later
SwitchTable *tab_rec =
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
+ tab_rec->switch_mir = mir;
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
uint32_t size = table[1];
- tab_rec->targets =
- static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
switch_tables_.push_back(tab_rec);
// Get the switch value
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 04113db..88a4605 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -557,29 +557,49 @@
LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset;
}
if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
- const int32_t* keys = reinterpret_cast<const int32_t*>(&(tab_rec->table[2]));
- for (int elems = 0; elems < tab_rec->table[1]; elems++) {
- int disp = tab_rec->targets[elems]->offset - bx_offset;
+ DCHECK(tab_rec->switch_mir != nullptr);
+ BasicBlock* bb = mir_graph_->GetBasicBlock(tab_rec->switch_mir->bb);
+ DCHECK(bb != nullptr);
+ int elems = 0;
+ for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
+ int key = successor_block_info->key;
+ int target = successor_block_info->block;
+ LIR* boundary_lir = InsertCaseLabel(target, key);
+ DCHECK(boundary_lir != nullptr);
+ int disp = boundary_lir->offset - bx_offset;
+ Push32(code_buffer_, key);
+ Push32(code_buffer_, disp);
if (cu_->verbose) {
LOG(INFO) << " Case[" << elems << "] key: 0x"
- << std::hex << keys[elems] << ", disp: 0x"
+ << std::hex << key << ", disp: 0x"
<< std::hex << disp;
}
- Push32(code_buffer_, keys[elems]);
- Push32(code_buffer_,
- tab_rec->targets[elems]->offset - bx_offset);
+ elems++;
}
+ DCHECK_EQ(elems, tab_rec->table[1]);
} else {
DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
static_cast<int>(Instruction::kPackedSwitchSignature));
- for (int elems = 0; elems < tab_rec->table[1]; elems++) {
- int disp = tab_rec->targets[elems]->offset - bx_offset;
+ DCHECK(tab_rec->switch_mir != nullptr);
+ BasicBlock* bb = mir_graph_->GetBasicBlock(tab_rec->switch_mir->bb);
+ DCHECK(bb != nullptr);
+ int elems = 0;
+ int low_key = s4FromSwitchData(&tab_rec->table[2]);
+ for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
+ int key = successor_block_info->key;
+ DCHECK_EQ(elems + low_key, key);
+ int target = successor_block_info->block;
+ LIR* boundary_lir = InsertCaseLabel(target, key);
+ DCHECK(boundary_lir != nullptr);
+ int disp = boundary_lir->offset - bx_offset;
+ Push32(code_buffer_, disp);
if (cu_->verbose) {
LOG(INFO) << " Case[" << elems << "] disp: 0x"
<< std::hex << disp;
}
- Push32(code_buffer_, tab_rec->targets[elems]->offset - bx_offset);
+ elems++;
}
+ DCHECK_EQ(elems, tab_rec->table[1]);
}
}
}
@@ -830,13 +850,15 @@
* branch table during the assembly phase. All resource flags
* are set to prevent code motion. KeyVal is just there for debugging.
*/
-LIR* Mir2Lir::InsertCaseLabel(DexOffset vaddr, int keyVal) {
- LIR* boundary_lir = &block_label_list_[mir_graph_->FindBlock(vaddr)->id];
+LIR* Mir2Lir::InsertCaseLabel(uint32_t bbid, int keyVal) {
+ LIR* boundary_lir = &block_label_list_[bbid];
LIR* res = boundary_lir;
if (cu_->verbose) {
// Only pay the expense if we're pretty-printing.
LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
- new_label->dalvik_offset = vaddr;
+ BasicBlock* bb = mir_graph_->GetBasicBlock(bbid);
+ DCHECK(bb != nullptr);
+ new_label->dalvik_offset = bb->start_offset;
new_label->opcode = kPseudoCaseLabel;
new_label->operands[0] = keyVal;
new_label->flags.fixup = kFixupLabel;
@@ -848,40 +870,6 @@
return res;
}
-void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) {
- const uint16_t* table = tab_rec->table;
- DexOffset base_vaddr = tab_rec->vaddr;
- const int32_t *targets = reinterpret_cast<const int32_t*>(&table[4]);
- int entries = table[1];
- int low_key = s4FromSwitchData(&table[2]);
- for (int i = 0; i < entries; i++) {
- tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key);
- }
-}
-
-void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec) {
- const uint16_t* table = tab_rec->table;
- DexOffset base_vaddr = tab_rec->vaddr;
- int entries = table[1];
- const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
- const int32_t* targets = &keys[entries];
- for (int i = 0; i < entries; i++) {
- tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]);
- }
-}
-
-void Mir2Lir::ProcessSwitchTables() {
- for (Mir2Lir::SwitchTable* tab_rec : switch_tables_) {
- if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
- MarkPackedCaseLabels(tab_rec);
- } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
- MarkSparseCaseLabels(tab_rec);
- } else {
- LOG(FATAL) << "Invalid switch table";
- }
- }
-}
-
void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) {
/*
* Sparse switch data format:
@@ -1032,9 +1020,6 @@
/* Method is not empty */
if (first_lir_insn_) {
- // mark the targets of switch statement case labels
- ProcessSwitchTables();
-
/* Convert LIR into machine code. */
AssembleLIR();
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index ccfdaf6..0719b52 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -68,17 +68,13 @@
*/
void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
- if (cu_->verbose) {
- DumpSparseSwitchTable(table);
- }
// Add the table to the list - we'll process it later
SwitchTable* tab_rec =
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
+ tab_rec->switch_mir = mir;
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
int elements = table[1];
- tab_rec->targets =
- static_cast<LIR**>(arena_->Alloc(elements * sizeof(LIR*), kArenaAllocLIR));
switch_tables_.push_back(tab_rec);
// The table is composed of 8-byte key/disp pairs
@@ -145,17 +141,13 @@
*/
void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
- if (cu_->verbose) {
- DumpPackedSwitchTable(table);
- }
// Add the table to the list - we'll process it later
SwitchTable* tab_rec =
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
+ tab_rec->switch_mir = mir;
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
int size = table[1];
- tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
- kArenaAllocLIR));
switch_tables_.push_back(tab_rec);
// Get the switch value
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 888c34e..9f1a497 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -224,7 +224,7 @@
struct SwitchTable : EmbeddedData {
LIR* anchor; // Reference instruction for relative offsets.
- LIR** targets; // Array of case targets.
+ MIR* switch_mir; // The switch mir.
};
/* Static register use counts */
@@ -653,7 +653,6 @@
LIR* ScanLiteralPoolClass(LIR* data_target, const DexFile& dex_file, uint32_t type_idx);
LIR* AddWordData(LIR* *constant_list_p, int value);
LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi);
- void ProcessSwitchTables();
void DumpSparseSwitchTable(const uint16_t* table);
void DumpPackedSwitchTable(const uint16_t* table);
void MarkBoundary(DexOffset offset, const char* inst_str);
@@ -671,9 +670,7 @@
int AssignLiteralOffset(CodeOffset offset);
int AssignSwitchTablesOffset(CodeOffset offset);
int AssignFillArrayDataOffset(CodeOffset offset);
- virtual LIR* InsertCaseLabel(DexOffset vaddr, int keyVal);
- virtual void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec);
- void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec);
+ LIR* InsertCaseLabel(uint32_t bbid, int keyVal);
// Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation. No code generated.
virtual RegLocation NarrowRegLoc(RegLocation loc);
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index aa0972f..284e8f6 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -37,84 +37,6 @@
}
/*
- * We override InsertCaseLabel, because the first parameter represents
- * a basic block id, instead of a dex offset.
- */
-LIR* X86Mir2Lir::InsertCaseLabel(DexOffset bbid, int keyVal) {
- LIR* boundary_lir = &block_label_list_[bbid];
- LIR* res = boundary_lir;
- if (cu_->verbose) {
- // Only pay the expense if we're pretty-printing.
- LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
- BasicBlock* bb = mir_graph_->GetBasicBlock(bbid);
- DCHECK(bb != nullptr);
- new_label->dalvik_offset = bb->start_offset;;
- new_label->opcode = kPseudoCaseLabel;
- new_label->operands[0] = keyVal;
- new_label->flags.fixup = kFixupLabel;
- DCHECK(!new_label->flags.use_def_invalid);
- new_label->u.m.def_mask = &kEncodeAll;
- InsertLIRAfter(boundary_lir, new_label);
- res = new_label;
- }
- return res;
-}
-
-void X86Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) {
- const uint16_t* table = tab_rec->table;
- const int32_t *targets = reinterpret_cast<const int32_t*>(&table[4]);
- int entries = table[1];
- int low_key = s4FromSwitchData(&table[2]);
- for (int i = 0; i < entries; i++) {
- // The value at targets[i] is a basic block id, instead of a dex offset.
- tab_rec->targets[i] = InsertCaseLabel(targets[i], i + low_key);
- }
-}
-
-/*
- * We convert and create a new packed switch table that stores
- * basic block ids to targets[] by examining successor blocks.
- * Note that the original packed switch table stores dex offsets to targets[].
- */
-const uint16_t* X86Mir2Lir::ConvertPackedSwitchTable(MIR* mir, const uint16_t* table) {
- /*
- * The original packed switch data format:
- * ushort ident = 0x0100 magic value
- * ushort size number of entries in the table
- * int first_key first (and lowest) switch case value
- * int targets[size] branch targets, relative to switch opcode
- *
- * Total size is (4+size*2) 16-bit code units.
- *
- * Note that the new packed switch data format is the same as the original
- * format, except that targets[] are basic block ids.
- *
- */
- BasicBlock* bb = mir_graph_->GetBasicBlock(mir->bb);
- DCHECK(bb != nullptr);
- // Get the number of entries.
- int entries = table[1];
- const int32_t* as_int32 = reinterpret_cast<const int32_t*>(&table[2]);
- int32_t starting_key = as_int32[0];
- // Create a new table.
- int size = sizeof(uint16_t) * (4 + entries * 2);
- uint16_t* new_table = reinterpret_cast<uint16_t*>(arena_->Alloc(size, kArenaAllocMisc));
- // Copy ident, size, and first_key to the new table.
- memcpy(new_table, table, sizeof(uint16_t) * 4);
- // Get the new targets.
- int32_t* new_targets = reinterpret_cast<int32_t*>(&new_table[4]);
- // Find out targets for each entry.
- int i = 0;
- for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
- DCHECK_EQ(starting_key + i, successor_block_info->key);
- // Save target basic block id.
- new_targets[i++] = successor_block_info->block;
- }
- DCHECK_EQ(i, entries);
- return new_table;
-}
-
-/*
* Code pattern will look something like:
*
* mov r_val, ..
@@ -131,16 +53,14 @@
* done:
*/
void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
- const uint16_t* old_table = mir_graph_->GetTable(mir, table_offset);
- const uint16_t* table = ConvertPackedSwitchTable(mir, old_table);
+ const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
// Add the table to the list - we'll process it later
SwitchTable* tab_rec =
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
+ tab_rec->switch_mir = mir;
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
int size = table[1];
- tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
- kArenaAllocLIR));
switch_tables_.push_back(tab_rec);
// Get the switch value
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 811d4f5..ca60400 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -271,11 +271,8 @@
int first_bit, int second_bit) OVERRIDE;
void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
- const uint16_t* ConvertPackedSwitchTable(MIR* mir, const uint16_t* table);
void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
- LIR* InsertCaseLabel(DexOffset vaddr, int keyVal) OVERRIDE;
- void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) OVERRIDE;
/**
* @brief Implement instanceof a final class with x86 specific code.