buzbee | 311ca16 | 2013-02-28 15:56:43 -0800 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "compiler_internals.h" |
| 18 | #include "mir_graph.h" |
| 19 | #include "leb128.h" |
| 20 | #include "dex_file-inl.h" |
| 21 | |
| 22 | namespace art { |
| 23 | |
| 24 | #define MAX_PATTERN_LEN 5 |
| 25 | |
| 26 | struct CodePattern { |
| 27 | const Instruction::Code opcodes[MAX_PATTERN_LEN]; |
| 28 | const SpecialCaseHandler handler_code; |
| 29 | }; |
| 30 | |
| 31 | static const CodePattern special_patterns[] = { |
| 32 | {{Instruction::RETURN_VOID}, kNullMethod}, |
| 33 | {{Instruction::CONST, Instruction::RETURN}, kConstFunction}, |
| 34 | {{Instruction::CONST_4, Instruction::RETURN}, kConstFunction}, |
| 35 | {{Instruction::CONST_4, Instruction::RETURN_OBJECT}, kConstFunction}, |
| 36 | {{Instruction::CONST_16, Instruction::RETURN}, kConstFunction}, |
| 37 | {{Instruction::IGET, Instruction:: RETURN}, kIGet}, |
| 38 | {{Instruction::IGET_BOOLEAN, Instruction::RETURN}, kIGetBoolean}, |
| 39 | {{Instruction::IGET_OBJECT, Instruction::RETURN_OBJECT}, kIGetObject}, |
| 40 | {{Instruction::IGET_BYTE, Instruction::RETURN}, kIGetByte}, |
| 41 | {{Instruction::IGET_CHAR, Instruction::RETURN}, kIGetChar}, |
| 42 | {{Instruction::IGET_SHORT, Instruction::RETURN}, kIGetShort}, |
| 43 | {{Instruction::IGET_WIDE, Instruction::RETURN_WIDE}, kIGetWide}, |
| 44 | {{Instruction::IPUT, Instruction::RETURN_VOID}, kIPut}, |
| 45 | {{Instruction::IPUT_BOOLEAN, Instruction::RETURN_VOID}, kIPutBoolean}, |
| 46 | {{Instruction::IPUT_OBJECT, Instruction::RETURN_VOID}, kIPutObject}, |
| 47 | {{Instruction::IPUT_BYTE, Instruction::RETURN_VOID}, kIPutByte}, |
| 48 | {{Instruction::IPUT_CHAR, Instruction::RETURN_VOID}, kIPutChar}, |
| 49 | {{Instruction::IPUT_SHORT, Instruction::RETURN_VOID}, kIPutShort}, |
| 50 | {{Instruction::IPUT_WIDE, Instruction::RETURN_VOID}, kIPutWide}, |
| 51 | {{Instruction::RETURN}, kIdentity}, |
| 52 | {{Instruction::RETURN_OBJECT}, kIdentity}, |
| 53 | {{Instruction::RETURN_WIDE}, kIdentity}, |
| 54 | }; |
| 55 | |
| 56 | MIRGraph::MIRGraph(CompilationUnit* cu) |
| 57 | : cu_(cu), |
| 58 | ssa_base_vregs_(NULL), |
| 59 | ssa_subscripts_(NULL), |
| 60 | ssa_strings_(NULL), |
| 61 | vreg_to_ssa_map_(NULL), |
| 62 | ssa_last_defs_(NULL), |
| 63 | is_constant_v_(NULL), |
| 64 | constant_values_(NULL), |
| 65 | num_reachable_blocks_(0), |
| 66 | i_dom_list_(NULL), |
| 67 | def_block_matrix_(NULL), |
| 68 | temp_block_v_(NULL), |
| 69 | temp_dalvik_register_v_(NULL), |
| 70 | temp_ssa_register_v_(NULL), |
| 71 | try_block_addr_(NULL), |
| 72 | entry_block_(NULL), |
| 73 | exit_block_(NULL), |
| 74 | cur_block_(NULL), |
| 75 | num_blocks_(0), |
| 76 | current_code_item_(NULL), |
| 77 | current_method_(kInvalidEntry), |
| 78 | current_offset_(kInvalidEntry), |
| 79 | def_count_(0), |
| 80 | opcode_count_(NULL), |
| 81 | num_ssa_regs_(0) { |
| 82 | CompilerInitGrowableList(cu, &block_list_, 0, kListBlockList); |
| 83 | try_block_addr_ = AllocBitVector(cu, 0, true /* expandable */); |
| 84 | } |
| 85 | |
| 86 | bool MIRGraph::ContentIsInsn(const uint16_t* code_ptr) { |
| 87 | uint16_t instr = *code_ptr; |
| 88 | Instruction::Code opcode = static_cast<Instruction::Code>(instr & 0xff); |
| 89 | /* |
| 90 | * Since the low 8-bit in metadata may look like NOP, we need to check |
| 91 | * both the low and whole sub-word to determine whether it is code or data. |
| 92 | */ |
| 93 | return (opcode != Instruction::NOP || instr == 0); |
| 94 | } |
| 95 | |
| 96 | /* |
| 97 | * Parse an instruction, return the length of the instruction |
| 98 | */ |
| 99 | int MIRGraph::ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction) |
| 100 | { |
| 101 | // Don't parse instruction data |
| 102 | if (!ContentIsInsn(code_ptr)) { |
| 103 | return 0; |
| 104 | } |
| 105 | |
| 106 | const Instruction* instruction = Instruction::At(code_ptr); |
| 107 | *decoded_instruction = DecodedInstruction(instruction); |
| 108 | |
| 109 | return instruction->SizeInCodeUnits(); |
| 110 | } |
| 111 | |
| 112 | |
| 113 | /* Split an existing block from the specified code offset into two */ |
| 114 | BasicBlock* MIRGraph::SplitBlock(unsigned int code_offset, |
| 115 | BasicBlock* orig_block, BasicBlock** immed_pred_block_p) |
| 116 | { |
| 117 | MIR* insn = orig_block->first_mir_insn; |
| 118 | while (insn) { |
| 119 | if (insn->offset == code_offset) break; |
| 120 | insn = insn->next; |
| 121 | } |
| 122 | if (insn == NULL) { |
| 123 | LOG(FATAL) << "Break split failed"; |
| 124 | } |
| 125 | BasicBlock *bottom_block = NewMemBB(cu_, kDalvikByteCode, num_blocks_++); |
| 126 | InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(bottom_block)); |
| 127 | |
| 128 | bottom_block->start_offset = code_offset; |
| 129 | bottom_block->first_mir_insn = insn; |
| 130 | bottom_block->last_mir_insn = orig_block->last_mir_insn; |
| 131 | |
| 132 | /* If this block was terminated by a return, the flag needs to go with the bottom block */ |
| 133 | bottom_block->terminated_by_return = orig_block->terminated_by_return; |
| 134 | orig_block->terminated_by_return = false; |
| 135 | |
| 136 | /* Add it to the quick lookup cache */ |
| 137 | block_map_.Put(bottom_block->start_offset, bottom_block); |
| 138 | |
| 139 | /* Handle the taken path */ |
| 140 | bottom_block->taken = orig_block->taken; |
| 141 | if (bottom_block->taken) { |
| 142 | orig_block->taken = NULL; |
| 143 | DeleteGrowableList(bottom_block->taken->predecessors, reinterpret_cast<uintptr_t>(orig_block)); |
| 144 | InsertGrowableList(cu_, bottom_block->taken->predecessors, |
| 145 | reinterpret_cast<uintptr_t>(bottom_block)); |
| 146 | } |
| 147 | |
| 148 | /* Handle the fallthrough path */ |
| 149 | bottom_block->fall_through = orig_block->fall_through; |
| 150 | orig_block->fall_through = bottom_block; |
| 151 | InsertGrowableList(cu_, bottom_block->predecessors, |
| 152 | reinterpret_cast<uintptr_t>(orig_block)); |
| 153 | if (bottom_block->fall_through) { |
| 154 | DeleteGrowableList(bottom_block->fall_through->predecessors, |
| 155 | reinterpret_cast<uintptr_t>(orig_block)); |
| 156 | InsertGrowableList(cu_, bottom_block->fall_through->predecessors, |
| 157 | reinterpret_cast<uintptr_t>(bottom_block)); |
| 158 | } |
| 159 | |
| 160 | /* Handle the successor list */ |
| 161 | if (orig_block->successor_block_list.block_list_type != kNotUsed) { |
| 162 | bottom_block->successor_block_list = orig_block->successor_block_list; |
| 163 | orig_block->successor_block_list.block_list_type = kNotUsed; |
| 164 | GrowableListIterator iterator; |
| 165 | |
| 166 | GrowableListIteratorInit(&bottom_block->successor_block_list.blocks, |
| 167 | &iterator); |
| 168 | while (true) { |
| 169 | SuccessorBlockInfo *successor_block_info = |
| 170 | reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator)); |
| 171 | if (successor_block_info == NULL) break; |
| 172 | BasicBlock *bb = successor_block_info->block; |
| 173 | DeleteGrowableList(bb->predecessors, reinterpret_cast<uintptr_t>(orig_block)); |
| 174 | InsertGrowableList(cu_, bb->predecessors, reinterpret_cast<uintptr_t>(bottom_block)); |
| 175 | } |
| 176 | } |
| 177 | |
| 178 | orig_block->last_mir_insn = insn->prev; |
| 179 | |
| 180 | insn->prev->next = NULL; |
| 181 | insn->prev = NULL; |
| 182 | /* |
| 183 | * Update the immediate predecessor block pointer so that outgoing edges |
| 184 | * can be applied to the proper block. |
| 185 | */ |
| 186 | if (immed_pred_block_p) { |
| 187 | DCHECK_EQ(*immed_pred_block_p, orig_block); |
| 188 | *immed_pred_block_p = bottom_block; |
| 189 | } |
| 190 | return bottom_block; |
| 191 | } |
| 192 | |
| 193 | /* |
| 194 | * Given a code offset, find out the block that starts with it. If the offset |
| 195 | * is in the middle of an existing block, split it into two. If immed_pred_block_p |
| 196 | * is not non-null and is the block being split, update *immed_pred_block_p to |
| 197 | * point to the bottom block so that outgoing edges can be set up properly |
| 198 | * (by the caller) |
| 199 | * Utilizes a map for fast lookup of the typical cases. |
| 200 | */ |
| 201 | BasicBlock* MIRGraph::FindBlock(unsigned int code_offset, bool split, bool create, |
| 202 | BasicBlock** immed_pred_block_p) |
| 203 | { |
| 204 | BasicBlock* bb; |
| 205 | unsigned int i; |
| 206 | SafeMap<unsigned int, BasicBlock*>::iterator it; |
| 207 | |
| 208 | it = block_map_.find(code_offset); |
| 209 | if (it != block_map_.end()) { |
| 210 | return it->second; |
| 211 | } else if (!create) { |
| 212 | return NULL; |
| 213 | } |
| 214 | |
| 215 | if (split) { |
| 216 | for (i = 0; i < block_list_.num_used; i++) { |
| 217 | bb = reinterpret_cast<BasicBlock*>(block_list_.elem_list[i]); |
| 218 | if (bb->block_type != kDalvikByteCode) continue; |
| 219 | /* Check if a branch jumps into the middle of an existing block */ |
| 220 | if ((code_offset > bb->start_offset) && (bb->last_mir_insn != NULL) && |
| 221 | (code_offset <= bb->last_mir_insn->offset)) { |
| 222 | BasicBlock *new_bb = SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? |
| 223 | immed_pred_block_p : NULL); |
| 224 | return new_bb; |
| 225 | } |
| 226 | } |
| 227 | } |
| 228 | |
| 229 | /* Create a new one */ |
| 230 | bb = NewMemBB(cu_, kDalvikByteCode, num_blocks_++); |
| 231 | InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(bb)); |
| 232 | bb->start_offset = code_offset; |
| 233 | block_map_.Put(bb->start_offset, bb); |
| 234 | return bb; |
| 235 | } |
| 236 | |
| 237 | /* Identify code range in try blocks and set up the empty catch blocks */ |
| 238 | void MIRGraph::ProcessTryCatchBlocks() |
| 239 | { |
| 240 | int tries_size = current_code_item_->tries_size_; |
| 241 | int offset; |
| 242 | |
| 243 | if (tries_size == 0) { |
| 244 | return; |
| 245 | } |
| 246 | |
| 247 | for (int i = 0; i < tries_size; i++) { |
| 248 | const DexFile::TryItem* pTry = |
| 249 | DexFile::GetTryItems(*current_code_item_, i); |
| 250 | int start_offset = pTry->start_addr_; |
| 251 | int end_offset = start_offset + pTry->insn_count_; |
| 252 | for (offset = start_offset; offset < end_offset; offset++) { |
| 253 | SetBit(cu_, try_block_addr_, offset); |
| 254 | } |
| 255 | } |
| 256 | |
| 257 | // Iterate over each of the handlers to enqueue the empty Catch blocks |
| 258 | const byte* handlers_ptr = DexFile::GetCatchHandlerData(*current_code_item_, 0); |
| 259 | uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr); |
| 260 | for (uint32_t idx = 0; idx < handlers_size; idx++) { |
| 261 | CatchHandlerIterator iterator(handlers_ptr); |
| 262 | for (; iterator.HasNext(); iterator.Next()) { |
| 263 | uint32_t address = iterator.GetHandlerAddress(); |
| 264 | FindBlock(address, false /* split */, true /*create*/, |
| 265 | /* immed_pred_block_p */ NULL); |
| 266 | } |
| 267 | handlers_ptr = iterator.EndDataPointer(); |
| 268 | } |
| 269 | } |
| 270 | |
| 271 | /* Process instructions with the kBranch flag */ |
| 272 | BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, |
| 273 | int flags, const uint16_t* code_ptr, |
| 274 | const uint16_t* code_end) |
| 275 | { |
| 276 | int target = cur_offset; |
| 277 | switch (insn->dalvikInsn.opcode) { |
| 278 | case Instruction::GOTO: |
| 279 | case Instruction::GOTO_16: |
| 280 | case Instruction::GOTO_32: |
| 281 | target += insn->dalvikInsn.vA; |
| 282 | break; |
| 283 | case Instruction::IF_EQ: |
| 284 | case Instruction::IF_NE: |
| 285 | case Instruction::IF_LT: |
| 286 | case Instruction::IF_GE: |
| 287 | case Instruction::IF_GT: |
| 288 | case Instruction::IF_LE: |
| 289 | cur_block->conditional_branch = true; |
| 290 | target += insn->dalvikInsn.vC; |
| 291 | break; |
| 292 | case Instruction::IF_EQZ: |
| 293 | case Instruction::IF_NEZ: |
| 294 | case Instruction::IF_LTZ: |
| 295 | case Instruction::IF_GEZ: |
| 296 | case Instruction::IF_GTZ: |
| 297 | case Instruction::IF_LEZ: |
| 298 | cur_block->conditional_branch = true; |
| 299 | target += insn->dalvikInsn.vB; |
| 300 | break; |
| 301 | default: |
| 302 | LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set"; |
| 303 | } |
| 304 | BasicBlock *taken_block = FindBlock(target, /* split */ true, /* create */ true, |
| 305 | /* immed_pred_block_p */ &cur_block); |
| 306 | cur_block->taken = taken_block; |
| 307 | InsertGrowableList(cu_, taken_block->predecessors, reinterpret_cast<uintptr_t>(cur_block)); |
| 308 | |
| 309 | /* Always terminate the current block for conditional branches */ |
| 310 | if (flags & Instruction::kContinue) { |
| 311 | BasicBlock *fallthrough_block = FindBlock(cur_offset + width, |
| 312 | /* |
| 313 | * If the method is processed |
| 314 | * in sequential order from the |
| 315 | * beginning, we don't need to |
| 316 | * specify split for continue |
| 317 | * blocks. However, this |
| 318 | * routine can be called by |
| 319 | * compileLoop, which starts |
| 320 | * parsing the method from an |
| 321 | * arbitrary address in the |
| 322 | * method body. |
| 323 | */ |
| 324 | true, |
| 325 | /* create */ |
| 326 | true, |
| 327 | /* immed_pred_block_p */ |
| 328 | &cur_block); |
| 329 | cur_block->fall_through = fallthrough_block; |
| 330 | InsertGrowableList(cu_, fallthrough_block->predecessors, |
| 331 | reinterpret_cast<uintptr_t>(cur_block)); |
| 332 | } else if (code_ptr < code_end) { |
| 333 | /* Create a fallthrough block for real instructions (incl. NOP) */ |
| 334 | if (ContentIsInsn(code_ptr)) { |
| 335 | FindBlock(cur_offset + width, /* split */ false, /* create */ true, |
| 336 | /* immed_pred_block_p */ NULL); |
| 337 | } |
| 338 | } |
| 339 | return cur_block; |
| 340 | } |
| 341 | |
| 342 | /* Process instructions with the kSwitch flag */ |
| 343 | void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, |
| 344 | int flags) |
| 345 | { |
| 346 | const uint16_t* switch_data = |
| 347 | reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB); |
| 348 | int size; |
| 349 | const int* keyTable; |
| 350 | const int* target_table; |
| 351 | int i; |
| 352 | int first_key; |
| 353 | |
| 354 | /* |
| 355 | * Packed switch data format: |
| 356 | * ushort ident = 0x0100 magic value |
| 357 | * ushort size number of entries in the table |
| 358 | * int first_key first (and lowest) switch case value |
| 359 | * int targets[size] branch targets, relative to switch opcode |
| 360 | * |
| 361 | * Total size is (4+size*2) 16-bit code units. |
| 362 | */ |
| 363 | if (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) { |
| 364 | DCHECK_EQ(static_cast<int>(switch_data[0]), |
| 365 | static_cast<int>(Instruction::kPackedSwitchSignature)); |
| 366 | size = switch_data[1]; |
| 367 | first_key = switch_data[2] | (switch_data[3] << 16); |
| 368 | target_table = reinterpret_cast<const int*>(&switch_data[4]); |
| 369 | keyTable = NULL; // Make the compiler happy |
| 370 | /* |
| 371 | * Sparse switch data format: |
| 372 | * ushort ident = 0x0200 magic value |
| 373 | * ushort size number of entries in the table; > 0 |
| 374 | * int keys[size] keys, sorted low-to-high; 32-bit aligned |
| 375 | * int targets[size] branch targets, relative to switch opcode |
| 376 | * |
| 377 | * Total size is (2+size*4) 16-bit code units. |
| 378 | */ |
| 379 | } else { |
| 380 | DCHECK_EQ(static_cast<int>(switch_data[0]), |
| 381 | static_cast<int>(Instruction::kSparseSwitchSignature)); |
| 382 | size = switch_data[1]; |
| 383 | keyTable = reinterpret_cast<const int*>(&switch_data[2]); |
| 384 | target_table = reinterpret_cast<const int*>(&switch_data[2 + size*2]); |
| 385 | first_key = 0; // To make the compiler happy |
| 386 | } |
| 387 | |
| 388 | if (cur_block->successor_block_list.block_list_type != kNotUsed) { |
| 389 | LOG(FATAL) << "Successor block list already in use: " |
| 390 | << static_cast<int>(cur_block->successor_block_list.block_list_type); |
| 391 | } |
| 392 | cur_block->successor_block_list.block_list_type = |
| 393 | (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ? |
| 394 | kPackedSwitch : kSparseSwitch; |
| 395 | CompilerInitGrowableList(cu_, &cur_block->successor_block_list.blocks, size, |
| 396 | kListSuccessorBlocks); |
| 397 | |
| 398 | for (i = 0; i < size; i++) { |
| 399 | BasicBlock *case_block = FindBlock(cur_offset + target_table[i], /* split */ true, |
| 400 | /* create */ true, /* immed_pred_block_p */ &cur_block); |
| 401 | SuccessorBlockInfo *successor_block_info = |
| 402 | static_cast<SuccessorBlockInfo*>(NewMem(cu_, sizeof(SuccessorBlockInfo), |
| 403 | false, kAllocSuccessor)); |
| 404 | successor_block_info->block = case_block; |
| 405 | successor_block_info->key = |
| 406 | (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ? |
| 407 | first_key + i : keyTable[i]; |
| 408 | InsertGrowableList(cu_, &cur_block->successor_block_list.blocks, |
| 409 | reinterpret_cast<uintptr_t>(successor_block_info)); |
| 410 | InsertGrowableList(cu_, case_block->predecessors, |
| 411 | reinterpret_cast<uintptr_t>(cur_block)); |
| 412 | } |
| 413 | |
| 414 | /* Fall-through case */ |
| 415 | BasicBlock* fallthrough_block = FindBlock( cur_offset + width, /* split */ false, |
| 416 | /* create */ true, /* immed_pred_block_p */ NULL); |
| 417 | cur_block->fall_through = fallthrough_block; |
| 418 | InsertGrowableList(cu_, fallthrough_block->predecessors, |
| 419 | reinterpret_cast<uintptr_t>(cur_block)); |
| 420 | } |
| 421 | |
| 422 | /* Process instructions with the kThrow flag */ |
| 423 | BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, |
| 424 | int flags, ArenaBitVector* try_block_addr, |
| 425 | const uint16_t* code_ptr, const uint16_t* code_end) |
| 426 | { |
| 427 | bool in_try_block = IsBitSet(try_block_addr, cur_offset); |
| 428 | |
| 429 | /* In try block */ |
| 430 | if (in_try_block) { |
| 431 | CatchHandlerIterator iterator(*current_code_item_, cur_offset); |
| 432 | |
| 433 | if (cur_block->successor_block_list.block_list_type != kNotUsed) { |
| 434 | LOG(INFO) << PrettyMethod(cu_->method_idx, *cu_->dex_file); |
| 435 | LOG(FATAL) << "Successor block list already in use: " |
| 436 | << static_cast<int>(cur_block->successor_block_list.block_list_type); |
| 437 | } |
| 438 | |
| 439 | cur_block->successor_block_list.block_list_type = kCatch; |
| 440 | CompilerInitGrowableList(cu_, &cur_block->successor_block_list.blocks, 2, |
| 441 | kListSuccessorBlocks); |
| 442 | |
| 443 | for (;iterator.HasNext(); iterator.Next()) { |
| 444 | BasicBlock *catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/, |
| 445 | false /* creat */, NULL /* immed_pred_block_p */); |
| 446 | catch_block->catch_entry = true; |
| 447 | if (kIsDebugBuild) { |
| 448 | catches_.insert(catch_block->start_offset); |
| 449 | } |
| 450 | SuccessorBlockInfo *successor_block_info = reinterpret_cast<SuccessorBlockInfo*> |
| 451 | (NewMem(cu_, sizeof(SuccessorBlockInfo), false, kAllocSuccessor)); |
| 452 | successor_block_info->block = catch_block; |
| 453 | successor_block_info->key = iterator.GetHandlerTypeIndex(); |
| 454 | InsertGrowableList(cu_, &cur_block->successor_block_list.blocks, |
| 455 | reinterpret_cast<uintptr_t>(successor_block_info)); |
| 456 | InsertGrowableList(cu_, catch_block->predecessors, |
| 457 | reinterpret_cast<uintptr_t>(cur_block)); |
| 458 | } |
| 459 | } else { |
| 460 | BasicBlock *eh_block = NewMemBB(cu_, kExceptionHandling, num_blocks_++); |
| 461 | cur_block->taken = eh_block; |
| 462 | InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(eh_block)); |
| 463 | eh_block->start_offset = cur_offset; |
| 464 | InsertGrowableList(cu_, eh_block->predecessors, reinterpret_cast<uintptr_t>(cur_block)); |
| 465 | } |
| 466 | |
| 467 | if (insn->dalvikInsn.opcode == Instruction::THROW){ |
| 468 | cur_block->explicit_throw = true; |
| 469 | if ((code_ptr < code_end) && ContentIsInsn(code_ptr)) { |
| 470 | // Force creation of new block following THROW via side-effect |
| 471 | FindBlock(cur_offset + width, /* split */ false, /* create */ true, |
| 472 | /* immed_pred_block_p */ NULL); |
| 473 | } |
| 474 | if (!in_try_block) { |
| 475 | // Don't split a THROW that can't rethrow - we're done. |
| 476 | return cur_block; |
| 477 | } |
| 478 | } |
| 479 | |
| 480 | /* |
| 481 | * Split the potentially-throwing instruction into two parts. |
| 482 | * The first half will be a pseudo-op that captures the exception |
| 483 | * edges and terminates the basic block. It always falls through. |
| 484 | * Then, create a new basic block that begins with the throwing instruction |
| 485 | * (minus exceptions). Note: this new basic block must NOT be entered into |
| 486 | * the block_map. If the potentially-throwing instruction is the target of a |
| 487 | * future branch, we need to find the check psuedo half. The new |
| 488 | * basic block containing the work portion of the instruction should |
| 489 | * only be entered via fallthrough from the block containing the |
| 490 | * pseudo exception edge MIR. Note also that this new block is |
| 491 | * not automatically terminated after the work portion, and may |
| 492 | * contain following instructions. |
| 493 | */ |
| 494 | BasicBlock *new_block = NewMemBB(cu_, kDalvikByteCode, num_blocks_++); |
| 495 | InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(new_block)); |
| 496 | new_block->start_offset = insn->offset; |
| 497 | cur_block->fall_through = new_block; |
| 498 | InsertGrowableList(cu_, new_block->predecessors, reinterpret_cast<uintptr_t>(cur_block)); |
| 499 | MIR* new_insn = static_cast<MIR*>(NewMem(cu_, sizeof(MIR), true, kAllocMIR)); |
| 500 | *new_insn = *insn; |
| 501 | insn->dalvikInsn.opcode = |
| 502 | static_cast<Instruction::Code>(kMirOpCheck); |
| 503 | // Associate the two halves |
| 504 | insn->meta.throw_insn = new_insn; |
| 505 | new_insn->meta.throw_insn = insn; |
| 506 | AppendMIR(new_block, new_insn); |
| 507 | return new_block; |
| 508 | } |
| 509 | |
| 510 | /* Parse a Dex method and insert it into the MIRGraph at the current insert point. */ |
| 511 | void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, |
| 512 | InvokeType invoke_type, uint32_t class_def_idx, |
| 513 | uint32_t method_idx, jobject class_loader, const DexFile& dex_file) |
| 514 | { |
| 515 | current_code_item_ = code_item; |
| 516 | method_stack_.push_back(std::make_pair(current_method_, current_offset_)); |
| 517 | current_method_ = m_units_.size(); |
| 518 | current_offset_ = 0; |
| 519 | // TODO: will need to snapshot stack image and use that as the mir context identification. |
| 520 | m_units_.push_back(new DexCompilationUnit(cu_, class_loader, Runtime::Current()->GetClassLinker(), |
| 521 | dex_file, current_code_item_, class_def_idx, method_idx, access_flags)); |
| 522 | const uint16_t* code_ptr = current_code_item_->insns_; |
| 523 | const uint16_t* code_end = |
| 524 | current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_; |
| 525 | |
| 526 | // TODO: need to rework expansion of block list & try_block_addr when inlining activated. |
| 527 | ReallocGrowableList(cu_, &block_list_, block_list_.num_used + |
| 528 | current_code_item_->insns_size_in_code_units_); |
| 529 | // TODO: replace with explicit resize routine. Using automatic extension side effect for now. |
| 530 | SetBit(cu_, try_block_addr_, current_code_item_->insns_size_in_code_units_); |
| 531 | ClearBit(try_block_addr_, current_code_item_->insns_size_in_code_units_); |
| 532 | |
| 533 | // If this is the first method, set up default entry and exit blocks. |
| 534 | if (current_method_ == 0) { |
| 535 | DCHECK(entry_block_ == NULL); |
| 536 | DCHECK(exit_block_ == NULL); |
| 537 | DCHECK(num_blocks_ == 0); |
| 538 | entry_block_ = NewMemBB(cu_, kEntryBlock, num_blocks_++); |
| 539 | exit_block_ = NewMemBB(cu_, kExitBlock, num_blocks_++); |
| 540 | InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(entry_block_)); |
| 541 | InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(exit_block_)); |
| 542 | // TODO: deprecate all "cu->" fields; move what's left to wherever CompilationUnit is allocated. |
| 543 | cu_->dex_file = &dex_file; |
| 544 | cu_->class_def_idx = class_def_idx; |
| 545 | cu_->method_idx = method_idx; |
| 546 | cu_->access_flags = access_flags; |
| 547 | cu_->invoke_type = invoke_type; |
| 548 | cu_->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx)); |
| 549 | cu_->num_ins = current_code_item_->ins_size_; |
| 550 | cu_->num_regs = current_code_item_->registers_size_ - cu_->num_ins; |
| 551 | cu_->num_outs = current_code_item_->outs_size_; |
| 552 | cu_->num_dalvik_registers = current_code_item_->registers_size_; |
| 553 | cu_->insns = current_code_item_->insns_; |
| 554 | cu_->code_item = current_code_item_; |
| 555 | } else { |
| 556 | UNIMPLEMENTED(FATAL) << "Nested inlining not implemented."; |
| 557 | /* |
| 558 | * Will need to manage storage for ins & outs, push prevous state and update |
| 559 | * insert point. |
| 560 | */ |
| 561 | } |
| 562 | |
| 563 | /* Current block to record parsed instructions */ |
| 564 | BasicBlock *cur_block = NewMemBB(cu_, kDalvikByteCode, num_blocks_++); |
| 565 | DCHECK_EQ(current_offset_, 0); |
| 566 | cur_block->start_offset = current_offset_; |
| 567 | InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(cur_block)); |
| 568 | /* Add first block to the fast lookup cache */ |
| 569 | // FIXME: block map needs association with offset/method pair rather than just offset |
| 570 | block_map_.Put(cur_block->start_offset, cur_block); |
| 571 | // FIXME: this needs to insert at the insert point rather than entry block. |
| 572 | entry_block_->fall_through = cur_block; |
| 573 | InsertGrowableList(cu_, cur_block->predecessors, reinterpret_cast<uintptr_t>(entry_block_)); |
| 574 | |
| 575 | /* Identify code range in try blocks and set up the empty catch blocks */ |
| 576 | ProcessTryCatchBlocks(); |
| 577 | |
| 578 | /* Set up for simple method detection */ |
| 579 | int num_patterns = sizeof(special_patterns)/sizeof(special_patterns[0]); |
| 580 | bool live_pattern = (num_patterns > 0) && !(cu_->disable_opt & (1 << kMatch)); |
| 581 | bool* dead_pattern = |
| 582 | static_cast<bool*>(NewMem(cu_, sizeof(bool) * num_patterns, true, kAllocMisc)); |
| 583 | SpecialCaseHandler special_case = kNoHandler; |
| 584 | // FIXME - wire this up |
| 585 | (void)special_case; |
| 586 | int pattern_pos = 0; |
| 587 | |
| 588 | /* Parse all instructions and put them into containing basic blocks */ |
| 589 | while (code_ptr < code_end) { |
| 590 | MIR *insn = static_cast<MIR *>(NewMem(cu_, sizeof(MIR), true, kAllocMIR)); |
| 591 | insn->offset = current_offset_; |
| 592 | insn->m_unit_index = current_method_; |
| 593 | int width = ParseInsn(code_ptr, &insn->dalvikInsn); |
| 594 | insn->width = width; |
| 595 | Instruction::Code opcode = insn->dalvikInsn.opcode; |
| 596 | if (opcode_count_ != NULL) { |
| 597 | opcode_count_[static_cast<int>(opcode)]++; |
| 598 | } |
| 599 | |
| 600 | /* Terminate when the data section is seen */ |
| 601 | if (width == 0) |
| 602 | break; |
| 603 | |
| 604 | /* Possible simple method? */ |
| 605 | if (live_pattern) { |
| 606 | live_pattern = false; |
| 607 | special_case = kNoHandler; |
| 608 | for (int i = 0; i < num_patterns; i++) { |
| 609 | if (!dead_pattern[i]) { |
| 610 | if (special_patterns[i].opcodes[pattern_pos] == opcode) { |
| 611 | live_pattern = true; |
| 612 | special_case = special_patterns[i].handler_code; |
| 613 | } else { |
| 614 | dead_pattern[i] = true; |
| 615 | } |
| 616 | } |
| 617 | } |
| 618 | pattern_pos++; |
| 619 | } |
| 620 | |
| 621 | AppendMIR(cur_block, insn); |
| 622 | |
| 623 | code_ptr += width; |
| 624 | int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode); |
| 625 | |
| 626 | int df_flags = oat_data_flow_attributes[insn->dalvikInsn.opcode]; |
| 627 | |
| 628 | if (df_flags & DF_HAS_DEFS) { |
| 629 | def_count_ += (df_flags & DF_A_WIDE) ? 2 : 1; |
| 630 | } |
| 631 | |
| 632 | if (flags & Instruction::kBranch) { |
| 633 | cur_block = ProcessCanBranch(cur_block, insn, current_offset_, |
| 634 | width, flags, code_ptr, code_end); |
| 635 | } else if (flags & Instruction::kReturn) { |
| 636 | cur_block->terminated_by_return = true; |
| 637 | cur_block->fall_through = exit_block_; |
| 638 | InsertGrowableList(cu_, exit_block_->predecessors, |
| 639 | reinterpret_cast<uintptr_t>(cur_block)); |
| 640 | /* |
| 641 | * Terminate the current block if there are instructions |
| 642 | * afterwards. |
| 643 | */ |
| 644 | if (code_ptr < code_end) { |
| 645 | /* |
| 646 | * Create a fallthrough block for real instructions |
| 647 | * (incl. NOP). |
| 648 | */ |
| 649 | if (ContentIsInsn(code_ptr)) { |
| 650 | FindBlock(current_offset_ + width, /* split */ false, /* create */ true, |
| 651 | /* immed_pred_block_p */ NULL); |
| 652 | } |
| 653 | } |
| 654 | } else if (flags & Instruction::kThrow) { |
| 655 | cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_, |
| 656 | code_ptr, code_end); |
| 657 | } else if (flags & Instruction::kSwitch) { |
| 658 | ProcessCanSwitch(cur_block, insn, current_offset_, width, flags); |
| 659 | } |
| 660 | current_offset_ += width; |
| 661 | BasicBlock *next_block = FindBlock(current_offset_, /* split */ false, /* create */ |
| 662 | false, /* immed_pred_block_p */ NULL); |
| 663 | if (next_block) { |
| 664 | /* |
| 665 | * The next instruction could be the target of a previously parsed |
| 666 | * forward branch so a block is already created. If the current |
| 667 | * instruction is not an unconditional branch, connect them through |
| 668 | * the fall-through link. |
| 669 | */ |
| 670 | DCHECK(cur_block->fall_through == NULL || |
| 671 | cur_block->fall_through == next_block || |
| 672 | cur_block->fall_through == exit_block_); |
| 673 | |
| 674 | if ((cur_block->fall_through == NULL) && (flags & Instruction::kContinue)) { |
| 675 | cur_block->fall_through = next_block; |
| 676 | InsertGrowableList(cu_, next_block->predecessors, |
| 677 | reinterpret_cast<uintptr_t>(cur_block)); |
| 678 | } |
| 679 | cur_block = next_block; |
| 680 | } |
| 681 | } |
| 682 | if (cu_->enable_debug & (1 << kDebugDumpCFG)) { |
| 683 | DumpCFG("/sdcard/1_post_parse_cfg/", true); |
| 684 | } |
| 685 | |
| 686 | if (cu_->verbose) { |
| 687 | DumpCompilationUnit(cu_); |
| 688 | } |
| 689 | } |
| 690 | |
| 691 | void MIRGraph::ShowOpcodeStats() |
| 692 | { |
| 693 | DCHECK(opcode_count_ != NULL); |
| 694 | LOG(INFO) << "Opcode Count"; |
| 695 | for (int i = 0; i < kNumPackedOpcodes; i++) { |
| 696 | if (opcode_count_[i] != 0) { |
| 697 | LOG(INFO) << "-C- " << Instruction::Name(static_cast<Instruction::Code>(i)) |
| 698 | << " " << opcode_count_[i]; |
| 699 | } |
| 700 | } |
| 701 | } |
| 702 | |
| 703 | // TODO: use a configurable base prefix, and adjust callers to supply pass name. |
| 704 | /* Dump the CFG into a DOT graph */ |
| 705 | void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks) |
| 706 | { |
| 707 | FILE* file; |
| 708 | std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file)); |
| 709 | ReplaceSpecialChars(fname); |
| 710 | fname = StringPrintf("%s%s%x.dot", dir_prefix, fname.c_str(), |
| 711 | GetEntryBlock()->fall_through->start_offset); |
| 712 | file = fopen(fname.c_str(), "w"); |
| 713 | if (file == NULL) { |
| 714 | return; |
| 715 | } |
| 716 | fprintf(file, "digraph G {\n"); |
| 717 | |
| 718 | fprintf(file, " rankdir=TB\n"); |
| 719 | |
| 720 | int num_blocks = all_blocks ? GetNumBlocks() : num_reachable_blocks_; |
| 721 | int idx; |
| 722 | |
| 723 | for (idx = 0; idx < num_blocks; idx++) { |
| 724 | int block_idx = all_blocks ? idx : dfs_order_.elem_list[idx]; |
| 725 | BasicBlock *bb = GetBasicBlock(block_idx); |
| 726 | if (bb == NULL) break; |
| 727 | if (bb->block_type == kDead) continue; |
| 728 | if (bb->block_type == kEntryBlock) { |
| 729 | fprintf(file, " entry_%d [shape=Mdiamond];\n", bb->id); |
| 730 | } else if (bb->block_type == kExitBlock) { |
| 731 | fprintf(file, " exit_%d [shape=Mdiamond];\n", bb->id); |
| 732 | } else if (bb->block_type == kDalvikByteCode) { |
| 733 | fprintf(file, " block%04x_%d [shape=record,label = \"{ \\\n", |
| 734 | bb->start_offset, bb->id); |
| 735 | const MIR *mir; |
| 736 | fprintf(file, " {block id %d\\l}%s\\\n", bb->id, |
| 737 | bb->first_mir_insn ? " | " : " "); |
| 738 | for (mir = bb->first_mir_insn; mir; mir = mir->next) { |
| 739 | int opcode = mir->dalvikInsn.opcode; |
| 740 | fprintf(file, " {%04x %s %s %s\\l}%s\\\n", mir->offset, |
| 741 | mir->ssa_rep ? GetDalvikDisassembly(cu_, mir) : |
| 742 | (opcode < kMirOpFirst) ? Instruction::Name(mir->dalvikInsn.opcode) : |
| 743 | extended_mir_op_names[opcode - kMirOpFirst], |
| 744 | (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ", |
| 745 | (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ", |
| 746 | mir->next ? " | " : " "); |
| 747 | } |
| 748 | fprintf(file, " }\"];\n\n"); |
| 749 | } else if (bb->block_type == kExceptionHandling) { |
| 750 | char block_name[BLOCK_NAME_LEN]; |
| 751 | |
| 752 | GetBlockName(bb, block_name); |
| 753 | fprintf(file, " %s [shape=invhouse];\n", block_name); |
| 754 | } |
| 755 | |
| 756 | char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN]; |
| 757 | |
| 758 | if (bb->taken) { |
| 759 | GetBlockName(bb, block_name1); |
| 760 | GetBlockName(bb->taken, block_name2); |
| 761 | fprintf(file, " %s:s -> %s:n [style=dotted]\n", |
| 762 | block_name1, block_name2); |
| 763 | } |
| 764 | if (bb->fall_through) { |
| 765 | GetBlockName(bb, block_name1); |
| 766 | GetBlockName(bb->fall_through, block_name2); |
| 767 | fprintf(file, " %s:s -> %s:n\n", block_name1, block_name2); |
| 768 | } |
| 769 | |
| 770 | if (bb->successor_block_list.block_list_type != kNotUsed) { |
| 771 | fprintf(file, " succ%04x_%d [shape=%s,label = \"{ \\\n", |
| 772 | bb->start_offset, bb->id, |
| 773 | (bb->successor_block_list.block_list_type == kCatch) ? |
| 774 | "Mrecord" : "record"); |
| 775 | GrowableListIterator iterator; |
| 776 | GrowableListIteratorInit(&bb->successor_block_list.blocks, |
| 777 | &iterator); |
| 778 | SuccessorBlockInfo *successor_block_info = |
| 779 | reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator)); |
| 780 | |
| 781 | int succ_id = 0; |
| 782 | while (true) { |
| 783 | if (successor_block_info == NULL) break; |
| 784 | |
| 785 | BasicBlock *dest_block = successor_block_info->block; |
| 786 | SuccessorBlockInfo *next_successor_block_info = |
| 787 | reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator)); |
| 788 | |
| 789 | fprintf(file, " {<f%d> %04x: %04x\\l}%s\\\n", |
| 790 | succ_id++, |
| 791 | successor_block_info->key, |
| 792 | dest_block->start_offset, |
| 793 | (next_successor_block_info != NULL) ? " | " : " "); |
| 794 | |
| 795 | successor_block_info = next_successor_block_info; |
| 796 | } |
| 797 | fprintf(file, " }\"];\n\n"); |
| 798 | |
| 799 | GetBlockName(bb, block_name1); |
| 800 | fprintf(file, " %s:s -> succ%04x_%d:n [style=dashed]\n", |
| 801 | block_name1, bb->start_offset, bb->id); |
| 802 | |
| 803 | if (bb->successor_block_list.block_list_type == kPackedSwitch || |
| 804 | bb->successor_block_list.block_list_type == kSparseSwitch) { |
| 805 | |
| 806 | GrowableListIteratorInit(&bb->successor_block_list.blocks, |
| 807 | &iterator); |
| 808 | |
| 809 | succ_id = 0; |
| 810 | while (true) { |
| 811 | SuccessorBlockInfo *successor_block_info = |
| 812 | reinterpret_cast<SuccessorBlockInfo*>( GrowableListIteratorNext(&iterator)); |
| 813 | if (successor_block_info == NULL) break; |
| 814 | |
| 815 | BasicBlock *dest_block = successor_block_info->block; |
| 816 | |
| 817 | GetBlockName(dest_block, block_name2); |
| 818 | fprintf(file, " succ%04x_%d:f%d:e -> %s:n\n", bb->start_offset, |
| 819 | bb->id, succ_id++, block_name2); |
| 820 | } |
| 821 | } |
| 822 | } |
| 823 | fprintf(file, "\n"); |
| 824 | |
| 825 | if (cu_->verbose) { |
| 826 | /* Display the dominator tree */ |
| 827 | GetBlockName(bb, block_name1); |
| 828 | fprintf(file, " cfg%s [label=\"%s\", shape=none];\n", |
| 829 | block_name1, block_name1); |
| 830 | if (bb->i_dom) { |
| 831 | GetBlockName(bb->i_dom, block_name2); |
| 832 | fprintf(file, " cfg%s:s -> cfg%s:n\n\n", block_name2, block_name1); |
| 833 | } |
| 834 | } |
| 835 | } |
| 836 | fprintf(file, "}\n"); |
| 837 | fclose(file); |
| 838 | } |
| 839 | |
| 840 | } // namespace art |