Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "dex/compiler_internals.h" |
| 18 | #include "dex_file-inl.h" |
| 19 | #include "gc_map.h" |
| 20 | #include "mir_to_lir-inl.h" |
| 21 | #include "verifier/dex_gc_map.h" |
| 22 | #include "verifier/method_verifier.h" |
| 23 | |
| 24 | namespace art { |
| 25 | |
| 26 | bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) |
| 27 | { |
| 28 | bool res = false; |
| 29 | if (rl_src.is_const) { |
| 30 | if (rl_src.wide) { |
| 31 | if (rl_src.fp) { |
| 32 | res = InexpensiveConstantDouble(mir_graph_->ConstantValueWide(rl_src)); |
| 33 | } else { |
| 34 | res = InexpensiveConstantLong(mir_graph_->ConstantValueWide(rl_src)); |
| 35 | } |
| 36 | } else { |
| 37 | if (rl_src.fp) { |
| 38 | res = InexpensiveConstantFloat(mir_graph_->ConstantValue(rl_src)); |
| 39 | } else { |
| 40 | res = InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src)); |
| 41 | } |
| 42 | } |
| 43 | } |
| 44 | return res; |
| 45 | } |
| 46 | |
| 47 | void Mir2Lir::MarkSafepointPC(LIR* inst) |
| 48 | { |
| 49 | inst->def_mask = ENCODE_ALL; |
| 50 | LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC); |
| 51 | DCHECK_EQ(safepoint_pc->def_mask, ENCODE_ALL); |
| 52 | } |
| 53 | |
| 54 | bool Mir2Lir::FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put) |
| 55 | { |
| 56 | return cu_->compiler_driver->ComputeInstanceFieldInfo( |
| 57 | field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, is_volatile, is_put); |
| 58 | } |
| 59 | |
| 60 | /* Convert an instruction to a NOP */ |
| 61 | void Mir2Lir::NopLIR( LIR* lir) |
| 62 | { |
| 63 | lir->flags.is_nop = true; |
| 64 | } |
| 65 | |
| 66 | void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) |
| 67 | { |
| 68 | uint64_t *mask_ptr; |
| 69 | uint64_t mask = ENCODE_MEM;; |
| 70 | DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE)); |
| 71 | if (is_load) { |
| 72 | mask_ptr = &lir->use_mask; |
| 73 | } else { |
| 74 | mask_ptr = &lir->def_mask; |
| 75 | } |
| 76 | /* Clear out the memref flags */ |
| 77 | *mask_ptr &= ~mask; |
| 78 | /* ..and then add back the one we need */ |
| 79 | switch (mem_type) { |
| 80 | case kLiteral: |
| 81 | DCHECK(is_load); |
| 82 | *mask_ptr |= ENCODE_LITERAL; |
| 83 | break; |
| 84 | case kDalvikReg: |
| 85 | *mask_ptr |= ENCODE_DALVIK_REG; |
| 86 | break; |
| 87 | case kHeapRef: |
| 88 | *mask_ptr |= ENCODE_HEAP_REF; |
| 89 | break; |
| 90 | case kMustNotAlias: |
| 91 | /* Currently only loads can be marked as kMustNotAlias */ |
| 92 | DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE)); |
| 93 | *mask_ptr |= ENCODE_MUST_NOT_ALIAS; |
| 94 | break; |
| 95 | default: |
| 96 | LOG(FATAL) << "Oat: invalid memref kind - " << mem_type; |
| 97 | } |
| 98 | } |
| 99 | |
| 100 | /* |
| 101 | * Mark load/store instructions that access Dalvik registers through the stack. |
| 102 | */ |
| 103 | void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, |
| 104 | bool is64bit) |
| 105 | { |
| 106 | SetMemRefType(lir, is_load, kDalvikReg); |
| 107 | |
| 108 | /* |
| 109 | * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit |
| 110 | * access. |
| 111 | */ |
| 112 | lir->alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit); |
| 113 | } |
| 114 | |
| 115 | /* |
| 116 | * Debugging macros |
| 117 | */ |
| 118 | #define DUMP_RESOURCE_MASK(X) |
| 119 | |
| 120 | /* Pretty-print a LIR instruction */ |
| 121 | void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) |
| 122 | { |
| 123 | int offset = lir->offset; |
| 124 | int dest = lir->operands[0]; |
| 125 | const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops)); |
| 126 | |
| 127 | /* Handle pseudo-ops individually, and all regular insns as a group */ |
| 128 | switch (lir->opcode) { |
| 129 | case kPseudoMethodEntry: |
| 130 | LOG(INFO) << "-------- method entry " |
| 131 | << PrettyMethod(cu_->method_idx, *cu_->dex_file); |
| 132 | break; |
| 133 | case kPseudoMethodExit: |
| 134 | LOG(INFO) << "-------- Method_Exit"; |
| 135 | break; |
| 136 | case kPseudoBarrier: |
| 137 | LOG(INFO) << "-------- BARRIER"; |
| 138 | break; |
| 139 | case kPseudoEntryBlock: |
| 140 | LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest; |
| 141 | break; |
| 142 | case kPseudoDalvikByteCodeBoundary: |
| 143 | if (lir->operands[0] == 0) { |
| 144 | lir->operands[0] = reinterpret_cast<uintptr_t>("No instruction string"); |
| 145 | } |
| 146 | LOG(INFO) << "-------- dalvik offset: 0x" << std::hex |
| 147 | << lir->dalvik_offset << " @ " << reinterpret_cast<char*>(lir->operands[0]); |
| 148 | break; |
| 149 | case kPseudoExitBlock: |
| 150 | LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest; |
| 151 | break; |
| 152 | case kPseudoPseudoAlign4: |
| 153 | LOG(INFO) << reinterpret_cast<uintptr_t>(base_addr) + offset << " (0x" << std::hex |
| 154 | << offset << "): .align4"; |
| 155 | break; |
| 156 | case kPseudoEHBlockLabel: |
| 157 | LOG(INFO) << "Exception_Handling:"; |
| 158 | break; |
| 159 | case kPseudoTargetLabel: |
| 160 | case kPseudoNormalBlockLabel: |
| 161 | LOG(INFO) << "L" << reinterpret_cast<void*>(lir) << ":"; |
| 162 | break; |
| 163 | case kPseudoThrowTarget: |
| 164 | LOG(INFO) << "LT" << reinterpret_cast<void*>(lir) << ":"; |
| 165 | break; |
| 166 | case kPseudoIntrinsicRetry: |
| 167 | LOG(INFO) << "IR" << reinterpret_cast<void*>(lir) << ":"; |
| 168 | break; |
| 169 | case kPseudoSuspendTarget: |
| 170 | LOG(INFO) << "LS" << reinterpret_cast<void*>(lir) << ":"; |
| 171 | break; |
| 172 | case kPseudoSafepointPC: |
| 173 | LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":"; |
| 174 | break; |
| 175 | case kPseudoExportedPC: |
| 176 | LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":"; |
| 177 | break; |
| 178 | case kPseudoCaseLabel: |
| 179 | LOG(INFO) << "LC" << reinterpret_cast<void*>(lir) << ": Case target 0x" |
| 180 | << std::hex << lir->operands[0] << "|" << std::dec << |
| 181 | lir->operands[0]; |
| 182 | break; |
| 183 | default: |
| 184 | if (lir->flags.is_nop && !dump_nop) { |
| 185 | break; |
| 186 | } else { |
| 187 | std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode), |
| 188 | lir, base_addr)); |
| 189 | std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode), |
| 190 | lir, base_addr)); |
| 191 | LOG(INFO) << StringPrintf("%05x: %-9s%s%s", |
| 192 | reinterpret_cast<unsigned int>(base_addr + offset), |
| 193 | op_name.c_str(), op_operands.c_str(), |
| 194 | lir->flags.is_nop ? "(nop)" : ""); |
| 195 | } |
| 196 | break; |
| 197 | } |
| 198 | |
| 199 | if (lir->use_mask && (!lir->flags.is_nop || dump_nop)) { |
| 200 | DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->use_mask, "use")); |
| 201 | } |
| 202 | if (lir->def_mask && (!lir->flags.is_nop || dump_nop)) { |
| 203 | DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->def_mask, "def")); |
| 204 | } |
| 205 | } |
| 206 | |
| 207 | void Mir2Lir::DumpPromotionMap() |
| 208 | { |
| 209 | int num_regs = cu_->num_dalvik_registers + cu_->num_compiler_temps + 1; |
| 210 | for (int i = 0; i < num_regs; i++) { |
| 211 | PromotionMap v_reg_map = promotion_map_[i]; |
| 212 | std::string buf; |
| 213 | if (v_reg_map.fp_location == kLocPhysReg) { |
| 214 | StringAppendF(&buf, " : s%d", v_reg_map.FpReg & FpRegMask()); |
| 215 | } |
| 216 | |
| 217 | std::string buf3; |
| 218 | if (i < cu_->num_dalvik_registers) { |
| 219 | StringAppendF(&buf3, "%02d", i); |
| 220 | } else if (i == mir_graph_->GetMethodSReg()) { |
| 221 | buf3 = "Method*"; |
| 222 | } else { |
| 223 | StringAppendF(&buf3, "ct%d", i - cu_->num_dalvik_registers); |
| 224 | } |
| 225 | |
| 226 | LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(), |
| 227 | v_reg_map.core_location == kLocPhysReg ? |
| 228 | "r" : "SP+", v_reg_map.core_location == kLocPhysReg ? |
| 229 | v_reg_map.core_reg : SRegOffset(i), |
| 230 | buf.c_str()); |
| 231 | } |
| 232 | } |
| 233 | |
| 234 | /* Dump a mapping table */ |
| 235 | void Mir2Lir::DumpMappingTable(const char* table_name, const std::string& descriptor, |
| 236 | const std::string& name, const std::string& signature, |
| 237 | const std::vector<uint32_t>& v) { |
| 238 | if (v.size() > 0) { |
| 239 | std::string line(StringPrintf("\n %s %s%s_%s_table[%zu] = {", table_name, |
| 240 | descriptor.c_str(), name.c_str(), signature.c_str(), v.size())); |
| 241 | std::replace(line.begin(), line.end(), ';', '_'); |
| 242 | LOG(INFO) << line; |
| 243 | for (uint32_t i = 0; i < v.size(); i+=2) { |
| 244 | line = StringPrintf(" {0x%05x, 0x%04x},", v[i], v[i+1]); |
| 245 | LOG(INFO) << line; |
| 246 | } |
| 247 | LOG(INFO) <<" };\n\n"; |
| 248 | } |
| 249 | } |
| 250 | |
| 251 | /* Dump instructions and constant pool contents */ |
| 252 | void Mir2Lir::CodegenDump() |
| 253 | { |
| 254 | LOG(INFO) << "Dumping LIR insns for " |
| 255 | << PrettyMethod(cu_->method_idx, *cu_->dex_file); |
| 256 | LIR* lir_insn; |
| 257 | int insns_size = cu_->code_item->insns_size_in_code_units_; |
| 258 | |
| 259 | LOG(INFO) << "Regs (excluding ins) : " << cu_->num_regs; |
| 260 | LOG(INFO) << "Ins : " << cu_->num_ins; |
| 261 | LOG(INFO) << "Outs : " << cu_->num_outs; |
| 262 | LOG(INFO) << "CoreSpills : " << num_core_spills_; |
| 263 | LOG(INFO) << "FPSpills : " << num_fp_spills_; |
| 264 | LOG(INFO) << "CompilerTemps : " << cu_->num_compiler_temps; |
| 265 | LOG(INFO) << "Frame size : " << frame_size_; |
| 266 | LOG(INFO) << "code size is " << total_size_ << |
| 267 | " bytes, Dalvik size is " << insns_size * 2; |
| 268 | LOG(INFO) << "expansion factor: " |
| 269 | << static_cast<float>(total_size_) / static_cast<float>(insns_size * 2); |
| 270 | DumpPromotionMap(); |
| 271 | for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) { |
| 272 | DumpLIRInsn(lir_insn, 0); |
| 273 | } |
| 274 | for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) { |
| 275 | LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset, |
| 276 | lir_insn->operands[0]); |
| 277 | } |
| 278 | |
| 279 | const DexFile::MethodId& method_id = |
| 280 | cu_->dex_file->GetMethodId(cu_->method_idx); |
| 281 | std::string signature(cu_->dex_file->GetMethodSignature(method_id)); |
| 282 | std::string name(cu_->dex_file->GetMethodName(method_id)); |
| 283 | std::string descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id)); |
| 284 | |
| 285 | // Dump mapping tables |
| 286 | DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, pc2dex_mapping_table_); |
| 287 | DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, dex2pc_mapping_table_); |
| 288 | } |
| 289 | |
| 290 | /* |
| 291 | * Search the existing constants in the literal pool for an exact or close match |
| 292 | * within specified delta (greater or equal to 0). |
| 293 | */ |
| 294 | LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) |
| 295 | { |
| 296 | while (data_target) { |
| 297 | if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta) |
| 298 | return data_target; |
| 299 | data_target = data_target->next; |
| 300 | } |
| 301 | return NULL; |
| 302 | } |
| 303 | |
| 304 | /* Search the existing constants in the literal pool for an exact wide match */ |
| 305 | LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) |
| 306 | { |
| 307 | bool lo_match = false; |
| 308 | LIR* lo_target = NULL; |
| 309 | while (data_target) { |
| 310 | if (lo_match && (data_target->operands[0] == val_hi)) { |
| 311 | // Record high word in case we need to expand this later. |
| 312 | lo_target->operands[1] = val_hi; |
| 313 | return lo_target; |
| 314 | } |
| 315 | lo_match = false; |
| 316 | if (data_target->operands[0] == val_lo) { |
| 317 | lo_match = true; |
| 318 | lo_target = data_target; |
| 319 | } |
| 320 | data_target = data_target->next; |
| 321 | } |
| 322 | return NULL; |
| 323 | } |
| 324 | |
| 325 | /* |
| 326 | * The following are building blocks to insert constants into the pool or |
| 327 | * instruction streams. |
| 328 | */ |
| 329 | |
| 330 | /* Add a 32-bit constant to the constant pool */ |
| 331 | LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) |
| 332 | { |
| 333 | /* Add the constant to the literal pool */ |
| 334 | if (constant_list_p) { |
| 335 | LIR* new_value = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocData)); |
| 336 | new_value->operands[0] = value; |
| 337 | new_value->next = *constant_list_p; |
| 338 | *constant_list_p = new_value; |
| 339 | return new_value; |
| 340 | } |
| 341 | return NULL; |
| 342 | } |
| 343 | |
| 344 | /* Add a 64-bit constant to the constant pool or mixed with code */ |
| 345 | LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) |
| 346 | { |
| 347 | AddWordData(constant_list_p, val_hi); |
| 348 | return AddWordData(constant_list_p, val_lo); |
| 349 | } |
| 350 | |
| 351 | static void PushWord(std::vector<uint8_t>&buf, int data) { |
| 352 | buf.push_back( data & 0xff); |
| 353 | buf.push_back( (data >> 8) & 0xff); |
| 354 | buf.push_back( (data >> 16) & 0xff); |
| 355 | buf.push_back( (data >> 24) & 0xff); |
| 356 | } |
| 357 | |
| 358 | static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) { |
| 359 | while (buf.size() < offset) { |
| 360 | buf.push_back(0); |
| 361 | } |
| 362 | } |
| 363 | |
| 364 | /* Write the literal pool to the output stream */ |
| 365 | void Mir2Lir::InstallLiteralPools() |
| 366 | { |
| 367 | AlignBuffer(code_buffer_, data_offset_); |
| 368 | LIR* data_lir = literal_list_; |
| 369 | while (data_lir != NULL) { |
| 370 | PushWord(code_buffer_, data_lir->operands[0]); |
| 371 | data_lir = NEXT_LIR(data_lir); |
| 372 | } |
| 373 | // Push code and method literals, record offsets for the compiler to patch. |
| 374 | data_lir = code_literal_list_; |
| 375 | while (data_lir != NULL) { |
| 376 | uint32_t target = data_lir->operands[0]; |
| 377 | cu_->compiler_driver->AddCodePatch(cu_->dex_file, |
| 378 | cu_->method_idx, |
| 379 | cu_->invoke_type, |
| 380 | target, |
| 381 | static_cast<InvokeType>(data_lir->operands[1]), |
| 382 | code_buffer_.size()); |
| 383 | const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target); |
| 384 | // unique based on target to ensure code deduplication works |
| 385 | uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id); |
| 386 | PushWord(code_buffer_, unique_patch_value); |
| 387 | data_lir = NEXT_LIR(data_lir); |
| 388 | } |
| 389 | data_lir = method_literal_list_; |
| 390 | while (data_lir != NULL) { |
| 391 | uint32_t target = data_lir->operands[0]; |
| 392 | cu_->compiler_driver->AddMethodPatch(cu_->dex_file, |
| 393 | cu_->method_idx, |
| 394 | cu_->invoke_type, |
| 395 | target, |
| 396 | static_cast<InvokeType>(data_lir->operands[1]), |
| 397 | code_buffer_.size()); |
| 398 | const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target); |
| 399 | // unique based on target to ensure code deduplication works |
| 400 | uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id); |
| 401 | PushWord(code_buffer_, unique_patch_value); |
| 402 | data_lir = NEXT_LIR(data_lir); |
| 403 | } |
| 404 | } |
| 405 | |
| 406 | /* Write the switch tables to the output stream */ |
| 407 | void Mir2Lir::InstallSwitchTables() |
| 408 | { |
| 409 | GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); |
| 410 | while (true) { |
| 411 | Mir2Lir::SwitchTable* tab_rec = iterator.Next(); |
| 412 | if (tab_rec == NULL) break; |
| 413 | AlignBuffer(code_buffer_, tab_rec->offset); |
| 414 | /* |
| 415 | * For Arm, our reference point is the address of the bx |
| 416 | * instruction that does the launch, so we have to subtract |
| 417 | * the auto pc-advance. For other targets the reference point |
| 418 | * is a label, so we can use the offset as-is. |
| 419 | */ |
| 420 | int bx_offset = INVALID_OFFSET; |
| 421 | switch (cu_->instruction_set) { |
| 422 | case kThumb2: |
| 423 | bx_offset = tab_rec->anchor->offset + 4; |
| 424 | break; |
| 425 | case kX86: |
| 426 | bx_offset = 0; |
| 427 | break; |
| 428 | case kMips: |
| 429 | bx_offset = tab_rec->anchor->offset; |
| 430 | break; |
| 431 | default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; |
| 432 | } |
| 433 | if (cu_->verbose) { |
| 434 | LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset; |
| 435 | } |
| 436 | if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { |
| 437 | const int* keys = reinterpret_cast<const int*>(&(tab_rec->table[2])); |
| 438 | for (int elems = 0; elems < tab_rec->table[1]; elems++) { |
| 439 | int disp = tab_rec->targets[elems]->offset - bx_offset; |
| 440 | if (cu_->verbose) { |
| 441 | LOG(INFO) << " Case[" << elems << "] key: 0x" |
| 442 | << std::hex << keys[elems] << ", disp: 0x" |
| 443 | << std::hex << disp; |
| 444 | } |
| 445 | PushWord(code_buffer_, keys[elems]); |
| 446 | PushWord(code_buffer_, |
| 447 | tab_rec->targets[elems]->offset - bx_offset); |
| 448 | } |
| 449 | } else { |
| 450 | DCHECK_EQ(static_cast<int>(tab_rec->table[0]), |
| 451 | static_cast<int>(Instruction::kPackedSwitchSignature)); |
| 452 | for (int elems = 0; elems < tab_rec->table[1]; elems++) { |
| 453 | int disp = tab_rec->targets[elems]->offset - bx_offset; |
| 454 | if (cu_->verbose) { |
| 455 | LOG(INFO) << " Case[" << elems << "] disp: 0x" |
| 456 | << std::hex << disp; |
| 457 | } |
| 458 | PushWord(code_buffer_, tab_rec->targets[elems]->offset - bx_offset); |
| 459 | } |
| 460 | } |
| 461 | } |
| 462 | } |
| 463 | |
| 464 | /* Write the fill array dta to the output stream */ |
| 465 | void Mir2Lir::InstallFillArrayData() |
| 466 | { |
| 467 | GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_); |
| 468 | while (true) { |
| 469 | Mir2Lir::FillArrayData *tab_rec = iterator.Next(); |
| 470 | if (tab_rec == NULL) break; |
| 471 | AlignBuffer(code_buffer_, tab_rec->offset); |
| 472 | for (int i = 0; i < (tab_rec->size + 1) / 2; i++) { |
| 473 | code_buffer_.push_back( tab_rec->table[i] & 0xFF); |
| 474 | code_buffer_.push_back( (tab_rec->table[i] >> 8) & 0xFF); |
| 475 | } |
| 476 | } |
| 477 | } |
| 478 | |
| 479 | static int AssignLiteralOffsetCommon(LIR* lir, int offset) |
| 480 | { |
| 481 | for (;lir != NULL; lir = lir->next) { |
| 482 | lir->offset = offset; |
| 483 | offset += 4; |
| 484 | } |
| 485 | return offset; |
| 486 | } |
| 487 | |
| 488 | // Make sure we have a code address for every declared catch entry |
| 489 | bool Mir2Lir::VerifyCatchEntries() |
| 490 | { |
| 491 | bool success = true; |
| 492 | for (std::set<uint32_t>::const_iterator it = mir_graph_->catches_.begin(); |
| 493 | it != mir_graph_->catches_.end(); ++it) { |
| 494 | uint32_t dex_pc = *it; |
| 495 | bool found = false; |
| 496 | for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) { |
| 497 | if (dex_pc == dex2pc_mapping_table_[i+1]) { |
| 498 | found = true; |
| 499 | break; |
| 500 | } |
| 501 | } |
| 502 | if (!found) { |
| 503 | LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc; |
| 504 | success = false; |
| 505 | } |
| 506 | } |
| 507 | // Now, try in the other direction |
| 508 | for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) { |
| 509 | uint32_t dex_pc = dex2pc_mapping_table_[i+1]; |
| 510 | if (mir_graph_->catches_.find(dex_pc) == mir_graph_->catches_.end()) { |
| 511 | LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << dex_pc; |
| 512 | success = false; |
| 513 | } |
| 514 | } |
| 515 | if (!success) { |
| 516 | LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file); |
| 517 | LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: " |
| 518 | << dex2pc_mapping_table_.size()/2; |
| 519 | } |
| 520 | return success; |
| 521 | } |
| 522 | |
| 523 | |
| 524 | void Mir2Lir::CreateMappingTables() |
| 525 | { |
| 526 | for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { |
| 527 | if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { |
| 528 | pc2dex_mapping_table_.push_back(tgt_lir->offset); |
| 529 | pc2dex_mapping_table_.push_back(tgt_lir->dalvik_offset); |
| 530 | } |
| 531 | if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) { |
| 532 | dex2pc_mapping_table_.push_back(tgt_lir->offset); |
| 533 | dex2pc_mapping_table_.push_back(tgt_lir->dalvik_offset); |
| 534 | } |
| 535 | } |
| 536 | if (kIsDebugBuild) { |
| 537 | DCHECK(VerifyCatchEntries()); |
| 538 | } |
| 539 | combined_mapping_table_.push_back(pc2dex_mapping_table_.size() + |
| 540 | dex2pc_mapping_table_.size()); |
| 541 | combined_mapping_table_.push_back(pc2dex_mapping_table_.size()); |
| 542 | combined_mapping_table_.insert(combined_mapping_table_.end(), pc2dex_mapping_table_.begin(), |
| 543 | pc2dex_mapping_table_.end()); |
| 544 | combined_mapping_table_.insert(combined_mapping_table_.end(), dex2pc_mapping_table_.begin(), |
| 545 | dex2pc_mapping_table_.end()); |
| 546 | } |
| 547 | |
| 548 | class NativePcToReferenceMapBuilder { |
| 549 | public: |
| 550 | NativePcToReferenceMapBuilder(std::vector<uint8_t>* table, |
| 551 | size_t entries, uint32_t max_native_offset, |
| 552 | size_t references_width) : entries_(entries), |
| 553 | references_width_(references_width), in_use_(entries), |
| 554 | table_(table) { |
| 555 | // Compute width in bytes needed to hold max_native_offset. |
| 556 | native_offset_width_ = 0; |
| 557 | while (max_native_offset != 0) { |
| 558 | native_offset_width_++; |
| 559 | max_native_offset >>= 8; |
| 560 | } |
| 561 | // Resize table and set up header. |
| 562 | table->resize((EntryWidth() * entries) + sizeof(uint32_t)); |
| 563 | CHECK_LT(native_offset_width_, 1U << 3); |
| 564 | (*table)[0] = native_offset_width_ & 7; |
| 565 | CHECK_LT(references_width_, 1U << 13); |
| 566 | (*table)[0] |= (references_width_ << 3) & 0xFF; |
| 567 | (*table)[1] = (references_width_ >> 5) & 0xFF; |
| 568 | CHECK_LT(entries, 1U << 16); |
| 569 | (*table)[2] = entries & 0xFF; |
| 570 | (*table)[3] = (entries >> 8) & 0xFF; |
| 571 | } |
| 572 | |
| 573 | void AddEntry(uint32_t native_offset, const uint8_t* references) { |
| 574 | size_t table_index = TableIndex(native_offset); |
| 575 | while (in_use_[table_index]) { |
| 576 | table_index = (table_index + 1) % entries_; |
| 577 | } |
| 578 | in_use_[table_index] = true; |
| 579 | SetNativeOffset(table_index, native_offset); |
| 580 | DCHECK_EQ(native_offset, GetNativeOffset(table_index)); |
| 581 | SetReferences(table_index, references); |
| 582 | } |
| 583 | |
| 584 | private: |
| 585 | size_t TableIndex(uint32_t native_offset) { |
| 586 | return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_; |
| 587 | } |
| 588 | |
| 589 | uint32_t GetNativeOffset(size_t table_index) { |
| 590 | uint32_t native_offset = 0; |
| 591 | size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t); |
| 592 | for (size_t i = 0; i < native_offset_width_; i++) { |
| 593 | native_offset |= (*table_)[table_offset + i] << (i * 8); |
| 594 | } |
| 595 | return native_offset; |
| 596 | } |
| 597 | |
| 598 | void SetNativeOffset(size_t table_index, uint32_t native_offset) { |
| 599 | size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t); |
| 600 | for (size_t i = 0; i < native_offset_width_; i++) { |
| 601 | (*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF; |
| 602 | } |
| 603 | } |
| 604 | |
| 605 | void SetReferences(size_t table_index, const uint8_t* references) { |
| 606 | size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t); |
| 607 | memcpy(&(*table_)[table_offset + native_offset_width_], references, references_width_); |
| 608 | } |
| 609 | |
| 610 | size_t EntryWidth() const { |
| 611 | return native_offset_width_ + references_width_; |
| 612 | } |
| 613 | |
| 614 | // Number of entries in the table. |
| 615 | const size_t entries_; |
| 616 | // Number of bytes used to encode the reference bitmap. |
| 617 | const size_t references_width_; |
| 618 | // Number of bytes used to encode a native offset. |
| 619 | size_t native_offset_width_; |
| 620 | // Entries that are in use. |
| 621 | std::vector<bool> in_use_; |
| 622 | // The table we're building. |
| 623 | std::vector<uint8_t>* const table_; |
| 624 | }; |
| 625 | |
| 626 | void Mir2Lir::CreateNativeGcMap() { |
| 627 | const std::vector<uint32_t>& mapping_table = pc2dex_mapping_table_; |
| 628 | uint32_t max_native_offset = 0; |
| 629 | for (size_t i = 0; i < mapping_table.size(); i += 2) { |
| 630 | uint32_t native_offset = mapping_table[i + 0]; |
| 631 | if (native_offset > max_native_offset) { |
| 632 | max_native_offset = native_offset; |
| 633 | } |
| 634 | } |
| 635 | MethodReference method_ref(cu_->dex_file, cu_->method_idx); |
| 636 | const std::vector<uint8_t>* gc_map_raw = verifier::MethodVerifier::GetDexGcMap(method_ref); |
| 637 | verifier::DexPcToReferenceMap dex_gc_map(&(*gc_map_raw)[4], gc_map_raw->size() - 4); |
| 638 | // Compute native offset to references size. |
| 639 | NativePcToReferenceMapBuilder native_gc_map_builder(&native_gc_map_, |
| 640 | mapping_table.size() / 2, max_native_offset, |
| 641 | dex_gc_map.RegWidth()); |
| 642 | |
| 643 | for (size_t i = 0; i < mapping_table.size(); i += 2) { |
| 644 | uint32_t native_offset = mapping_table[i + 0]; |
| 645 | uint32_t dex_pc = mapping_table[i + 1]; |
| 646 | const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false); |
| 647 | CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc; |
| 648 | native_gc_map_builder.AddEntry(native_offset, references); |
| 649 | } |
| 650 | } |
| 651 | |
| 652 | /* Determine the offset of each literal field */ |
| 653 | int Mir2Lir::AssignLiteralOffset(int offset) |
| 654 | { |
| 655 | offset = AssignLiteralOffsetCommon(literal_list_, offset); |
| 656 | offset = AssignLiteralOffsetCommon(code_literal_list_, offset); |
| 657 | offset = AssignLiteralOffsetCommon(method_literal_list_, offset); |
| 658 | return offset; |
| 659 | } |
| 660 | |
| 661 | int Mir2Lir::AssignSwitchTablesOffset(int offset) |
| 662 | { |
| 663 | GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); |
| 664 | while (true) { |
| 665 | Mir2Lir::SwitchTable *tab_rec = iterator.Next(); |
| 666 | if (tab_rec == NULL) break; |
| 667 | tab_rec->offset = offset; |
| 668 | if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { |
| 669 | offset += tab_rec->table[1] * (sizeof(int) * 2); |
| 670 | } else { |
| 671 | DCHECK_EQ(static_cast<int>(tab_rec->table[0]), |
| 672 | static_cast<int>(Instruction::kPackedSwitchSignature)); |
| 673 | offset += tab_rec->table[1] * sizeof(int); |
| 674 | } |
| 675 | } |
| 676 | return offset; |
| 677 | } |
| 678 | |
| 679 | int Mir2Lir::AssignFillArrayDataOffset(int offset) |
| 680 | { |
| 681 | GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_); |
| 682 | while (true) { |
| 683 | Mir2Lir::FillArrayData *tab_rec = iterator.Next(); |
| 684 | if (tab_rec == NULL) break; |
| 685 | tab_rec->offset = offset; |
| 686 | offset += tab_rec->size; |
| 687 | // word align |
| 688 | offset = (offset + 3) & ~3; |
| 689 | } |
| 690 | return offset; |
| 691 | } |
| 692 | |
| 693 | // LIR offset assignment. |
| 694 | int Mir2Lir::AssignInsnOffsets() |
| 695 | { |
| 696 | LIR* lir; |
| 697 | int offset = 0; |
| 698 | |
| 699 | for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) { |
| 700 | lir->offset = offset; |
| 701 | if (lir->opcode >= 0) { |
| 702 | if (!lir->flags.is_nop) { |
| 703 | offset += lir->flags.size; |
| 704 | } |
| 705 | } else if (lir->opcode == kPseudoPseudoAlign4) { |
| 706 | if (offset & 0x2) { |
| 707 | offset += 2; |
| 708 | lir->operands[0] = 1; |
| 709 | } else { |
| 710 | lir->operands[0] = 0; |
| 711 | } |
| 712 | } |
| 713 | /* Pseudo opcodes don't consume space */ |
| 714 | } |
| 715 | |
| 716 | return offset; |
| 717 | } |
| 718 | |
| 719 | /* |
| 720 | * Walk the compilation unit and assign offsets to instructions |
| 721 | * and literals and compute the total size of the compiled unit. |
| 722 | */ |
| 723 | void Mir2Lir::AssignOffsets() |
| 724 | { |
| 725 | int offset = AssignInsnOffsets(); |
| 726 | |
| 727 | /* Const values have to be word aligned */ |
| 728 | offset = (offset + 3) & ~3; |
| 729 | |
| 730 | /* Set up offsets for literals */ |
| 731 | data_offset_ = offset; |
| 732 | |
| 733 | offset = AssignLiteralOffset(offset); |
| 734 | |
| 735 | offset = AssignSwitchTablesOffset(offset); |
| 736 | |
| 737 | offset = AssignFillArrayDataOffset(offset); |
| 738 | |
| 739 | total_size_ = offset; |
| 740 | } |
| 741 | |
| 742 | /* |
| 743 | * Go over each instruction in the list and calculate the offset from the top |
| 744 | * before sending them off to the assembler. If out-of-range branch distance is |
| 745 | * seen rearrange the instructions a bit to correct it. |
| 746 | */ |
| 747 | void Mir2Lir::AssembleLIR() |
| 748 | { |
| 749 | AssignOffsets(); |
| 750 | int assembler_retries = 0; |
| 751 | /* |
| 752 | * Assemble here. Note that we generate code with optimistic assumptions |
| 753 | * and if found now to work, we'll have to redo the sequence and retry. |
| 754 | */ |
| 755 | |
| 756 | while (true) { |
| 757 | AssemblerStatus res = AssembleInstructions(0); |
| 758 | if (res == kSuccess) { |
| 759 | break; |
| 760 | } else { |
| 761 | assembler_retries++; |
| 762 | if (assembler_retries > MAX_ASSEMBLER_RETRIES) { |
| 763 | CodegenDump(); |
| 764 | LOG(FATAL) << "Assembler error - too many retries"; |
| 765 | } |
| 766 | // Redo offsets and try again |
| 767 | AssignOffsets(); |
| 768 | code_buffer_.clear(); |
| 769 | } |
| 770 | } |
| 771 | |
| 772 | // Install literals |
| 773 | InstallLiteralPools(); |
| 774 | |
| 775 | // Install switch tables |
| 776 | InstallSwitchTables(); |
| 777 | |
| 778 | // Install fill array data |
| 779 | InstallFillArrayData(); |
| 780 | |
| 781 | // Create the mapping table and native offset to reference map. |
| 782 | CreateMappingTables(); |
| 783 | |
| 784 | CreateNativeGcMap(); |
| 785 | } |
| 786 | |
| 787 | /* |
| 788 | * Insert a kPseudoCaseLabel at the beginning of the Dalvik |
| 789 | * offset vaddr. This label will be used to fix up the case |
| 790 | * branch table during the assembly phase. Be sure to set |
| 791 | * all resource flags on this to prevent code motion across |
| 792 | * target boundaries. KeyVal is just there for debugging. |
| 793 | */ |
| 794 | LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) |
| 795 | { |
| 796 | SafeMap<unsigned int, LIR*>::iterator it; |
| 797 | it = boundary_map_.find(vaddr); |
| 798 | if (it == boundary_map_.end()) { |
| 799 | LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr; |
| 800 | } |
| 801 | LIR* new_label = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR)); |
| 802 | new_label->dalvik_offset = vaddr; |
| 803 | new_label->opcode = kPseudoCaseLabel; |
| 804 | new_label->operands[0] = keyVal; |
| 805 | InsertLIRAfter(it->second, new_label); |
| 806 | return new_label; |
| 807 | } |
| 808 | |
| 809 | void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec) |
| 810 | { |
| 811 | const uint16_t* table = tab_rec->table; |
| 812 | int base_vaddr = tab_rec->vaddr; |
| 813 | const int *targets = reinterpret_cast<const int*>(&table[4]); |
| 814 | int entries = table[1]; |
| 815 | int low_key = s4FromSwitchData(&table[2]); |
| 816 | for (int i = 0; i < entries; i++) { |
| 817 | tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key); |
| 818 | } |
| 819 | } |
| 820 | |
| 821 | void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec) |
| 822 | { |
| 823 | const uint16_t* table = tab_rec->table; |
| 824 | int base_vaddr = tab_rec->vaddr; |
| 825 | int entries = table[1]; |
| 826 | const int* keys = reinterpret_cast<const int*>(&table[2]); |
| 827 | const int* targets = &keys[entries]; |
| 828 | for (int i = 0; i < entries; i++) { |
| 829 | tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]); |
| 830 | } |
| 831 | } |
| 832 | |
| 833 | void Mir2Lir::ProcessSwitchTables() |
| 834 | { |
| 835 | GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); |
| 836 | while (true) { |
| 837 | Mir2Lir::SwitchTable *tab_rec = iterator.Next(); |
| 838 | if (tab_rec == NULL) break; |
| 839 | if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) { |
| 840 | MarkPackedCaseLabels(tab_rec); |
| 841 | } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { |
| 842 | MarkSparseCaseLabels(tab_rec); |
| 843 | } else { |
| 844 | LOG(FATAL) << "Invalid switch table"; |
| 845 | } |
| 846 | } |
| 847 | } |
| 848 | |
| 849 | void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) |
| 850 | /* |
| 851 | * Sparse switch data format: |
| 852 | * ushort ident = 0x0200 magic value |
| 853 | * ushort size number of entries in the table; > 0 |
| 854 | * int keys[size] keys, sorted low-to-high; 32-bit aligned |
| 855 | * int targets[size] branch targets, relative to switch opcode |
| 856 | * |
| 857 | * Total size is (2+size*4) 16-bit code units. |
| 858 | */ |
| 859 | { |
| 860 | uint16_t ident = table[0]; |
| 861 | int entries = table[1]; |
| 862 | const int* keys = reinterpret_cast<const int*>(&table[2]); |
| 863 | const int* targets = &keys[entries]; |
| 864 | LOG(INFO) << "Sparse switch table - ident:0x" << std::hex << ident |
| 865 | << ", entries: " << std::dec << entries; |
| 866 | for (int i = 0; i < entries; i++) { |
| 867 | LOG(INFO) << " Key[" << keys[i] << "] -> 0x" << std::hex << targets[i]; |
| 868 | } |
| 869 | } |
| 870 | |
| 871 | void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) |
| 872 | /* |
| 873 | * Packed switch data format: |
| 874 | * ushort ident = 0x0100 magic value |
| 875 | * ushort size number of entries in the table |
| 876 | * int first_key first (and lowest) switch case value |
| 877 | * int targets[size] branch targets, relative to switch opcode |
| 878 | * |
| 879 | * Total size is (4+size*2) 16-bit code units. |
| 880 | */ |
| 881 | { |
| 882 | uint16_t ident = table[0]; |
| 883 | const int* targets = reinterpret_cast<const int*>(&table[4]); |
| 884 | int entries = table[1]; |
| 885 | int low_key = s4FromSwitchData(&table[2]); |
| 886 | LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident |
| 887 | << ", entries: " << std::dec << entries << ", low_key: " << low_key; |
| 888 | for (int i = 0; i < entries; i++) { |
| 889 | LOG(INFO) << " Key[" << (i + low_key) << "] -> 0x" << std::hex |
| 890 | << targets[i]; |
| 891 | } |
| 892 | } |
| 893 | |
| 894 | /* |
| 895 | * Set up special LIR to mark a Dalvik byte-code instruction start and |
| 896 | * record it in the boundary_map. NOTE: in cases such as kMirOpCheck in |
| 897 | * which we split a single Dalvik instruction, only the first MIR op |
| 898 | * associated with a Dalvik PC should be entered into the map. |
| 899 | */ |
| 900 | LIR* Mir2Lir::MarkBoundary(int offset, const char* inst_str) |
| 901 | { |
| 902 | LIR* res = NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str)); |
| 903 | if (boundary_map_.find(offset) == boundary_map_.end()) { |
| 904 | boundary_map_.Put(offset, res); |
| 905 | } |
| 906 | return res; |
| 907 | } |
| 908 | |
| 909 | bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) |
| 910 | { |
| 911 | bool is_taken; |
| 912 | switch (opcode) { |
| 913 | case Instruction::IF_EQ: is_taken = (src1 == src2); break; |
| 914 | case Instruction::IF_NE: is_taken = (src1 != src2); break; |
| 915 | case Instruction::IF_LT: is_taken = (src1 < src2); break; |
| 916 | case Instruction::IF_GE: is_taken = (src1 >= src2); break; |
| 917 | case Instruction::IF_GT: is_taken = (src1 > src2); break; |
| 918 | case Instruction::IF_LE: is_taken = (src1 <= src2); break; |
| 919 | case Instruction::IF_EQZ: is_taken = (src1 == 0); break; |
| 920 | case Instruction::IF_NEZ: is_taken = (src1 != 0); break; |
| 921 | case Instruction::IF_LTZ: is_taken = (src1 < 0); break; |
| 922 | case Instruction::IF_GEZ: is_taken = (src1 >= 0); break; |
| 923 | case Instruction::IF_GTZ: is_taken = (src1 > 0); break; |
| 924 | case Instruction::IF_LEZ: is_taken = (src1 <= 0); break; |
| 925 | default: |
| 926 | LOG(FATAL) << "Unexpected opcode " << opcode; |
| 927 | is_taken = false; |
| 928 | } |
| 929 | return is_taken; |
| 930 | } |
| 931 | |
| 932 | // Convert relation of src1/src2 to src2/src1 |
| 933 | ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) { |
| 934 | ConditionCode res; |
| 935 | switch (before) { |
| 936 | case kCondEq: res = kCondEq; break; |
| 937 | case kCondNe: res = kCondNe; break; |
| 938 | case kCondLt: res = kCondGt; break; |
| 939 | case kCondGt: res = kCondLt; break; |
| 940 | case kCondLe: res = kCondGe; break; |
| 941 | case kCondGe: res = kCondLe; break; |
| 942 | default: |
| 943 | res = static_cast<ConditionCode>(0); |
| 944 | LOG(FATAL) << "Unexpected ccode " << before; |
| 945 | } |
| 946 | return res; |
| 947 | } |
| 948 | |
| 949 | // TODO: move to mir_to_lir.cc |
| 950 | Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) |
| 951 | : Backend(arena), |
| 952 | literal_list_(NULL), |
| 953 | method_literal_list_(NULL), |
| 954 | code_literal_list_(NULL), |
| 955 | cu_(cu), |
| 956 | mir_graph_(mir_graph), |
| 957 | switch_tables_(arena, 4, kGrowableArraySwitchTables), |
| 958 | fill_array_data_(arena, 4, kGrowableArrayFillArrayData), |
| 959 | throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads), |
| 960 | suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads), |
| 961 | intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc), |
| 962 | data_offset_(0), |
| 963 | total_size_(0), |
| 964 | block_label_list_(NULL), |
| 965 | current_dalvik_offset_(0), |
| 966 | reg_pool_(NULL), |
| 967 | live_sreg_(0), |
| 968 | num_core_spills_(0), |
| 969 | num_fp_spills_(0), |
| 970 | frame_size_(0), |
| 971 | core_spill_mask_(0), |
| 972 | fp_spill_mask_(0), |
| 973 | first_lir_insn_(NULL), |
| 974 | last_lir_insn_(NULL) |
| 975 | { |
| 976 | promotion_map_ = static_cast<PromotionMap*> |
| 977 | (arena_->NewMem((cu_->num_dalvik_registers + cu_->num_compiler_temps + 1) * |
| 978 | sizeof(promotion_map_[0]), true, ArenaAllocator::kAllocRegAlloc)); |
| 979 | } |
| 980 | |
| 981 | void Mir2Lir::Materialize() { |
| 982 | CompilerInitializeRegAlloc(); // Needs to happen after SSA naming |
| 983 | |
| 984 | /* Allocate Registers using simple local allocation scheme */ |
| 985 | SimpleRegAlloc(); |
| 986 | |
| 987 | //FIXME: re-enable by retrieving from mir_graph |
| 988 | SpecialCaseHandler special_case = kNoHandler; |
| 989 | |
| 990 | if (special_case != kNoHandler) { |
| 991 | /* |
| 992 | * Custom codegen for special cases. If for any reason the |
| 993 | * special codegen doesn't succeed, first_lir_insn_ will |
| 994 | * set to NULL; |
| 995 | */ |
| 996 | SpecialMIR2LIR(special_case); |
| 997 | } |
| 998 | |
| 999 | /* Convert MIR to LIR, etc. */ |
| 1000 | if (first_lir_insn_ == NULL) { |
| 1001 | MethodMIR2LIR(); |
| 1002 | } |
| 1003 | |
| 1004 | /* Method is not empty */ |
| 1005 | if (first_lir_insn_) { |
| 1006 | |
| 1007 | // mark the targets of switch statement case labels |
| 1008 | ProcessSwitchTables(); |
| 1009 | |
| 1010 | /* Convert LIR into machine code. */ |
| 1011 | AssembleLIR(); |
| 1012 | |
| 1013 | if (cu_->verbose) { |
| 1014 | CodegenDump(); |
| 1015 | } |
| 1016 | |
| 1017 | } |
| 1018 | |
| 1019 | } |
| 1020 | |
| 1021 | CompiledMethod* Mir2Lir::GetCompiledMethod() { |
| 1022 | // Combine vmap tables - core regs, then fp regs - into vmap_table |
| 1023 | std::vector<uint16_t> vmap_table; |
| 1024 | // Core regs may have been inserted out of order - sort first |
| 1025 | std::sort(core_vmap_table_.begin(), core_vmap_table_.end()); |
| 1026 | for (size_t i = 0 ; i < core_vmap_table_.size(); i++) { |
| 1027 | // Copy, stripping out the phys register sort key |
| 1028 | vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & core_vmap_table_[i]); |
| 1029 | } |
| 1030 | // If we have a frame, push a marker to take place of lr |
| 1031 | if (frame_size_ > 0) { |
| 1032 | vmap_table.push_back(INVALID_VREG); |
| 1033 | } else { |
| 1034 | DCHECK_EQ(__builtin_popcount(core_spill_mask_), 0); |
| 1035 | DCHECK_EQ(__builtin_popcount(fp_spill_mask_), 0); |
| 1036 | } |
| 1037 | // Combine vmap tables - core regs, then fp regs. fp regs already sorted |
| 1038 | for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) { |
| 1039 | vmap_table.push_back(fp_vmap_table_[i]); |
| 1040 | } |
| 1041 | CompiledMethod* result = |
| 1042 | new CompiledMethod(cu_->instruction_set, code_buffer_, |
| 1043 | frame_size_, core_spill_mask_, fp_spill_mask_, |
| 1044 | combined_mapping_table_, vmap_table, native_gc_map_); |
| 1045 | return result; |
| 1046 | } |
| 1047 | |
| 1048 | int Mir2Lir::ComputeFrameSize() { |
| 1049 | /* Figure out the frame size */ |
| 1050 | static const uint32_t kAlignMask = kStackAlignment - 1; |
| 1051 | uint32_t size = (num_core_spills_ + num_fp_spills_ + |
| 1052 | 1 /* filler word */ + cu_->num_regs + cu_->num_outs + |
| 1053 | cu_->num_compiler_temps + 1 /* cur_method* */) |
| 1054 | * sizeof(uint32_t); |
| 1055 | /* Align and set */ |
| 1056 | return (size + kAlignMask) & ~(kAlignMask); |
| 1057 | } |
| 1058 | |
| 1059 | /* |
| 1060 | * Append an LIR instruction to the LIR list maintained by a compilation |
| 1061 | * unit |
| 1062 | */ |
| 1063 | void Mir2Lir::AppendLIR(LIR* lir) |
| 1064 | { |
| 1065 | if (first_lir_insn_ == NULL) { |
| 1066 | DCHECK(last_lir_insn_ == NULL); |
| 1067 | last_lir_insn_ = first_lir_insn_ = lir; |
| 1068 | lir->prev = lir->next = NULL; |
| 1069 | } else { |
| 1070 | last_lir_insn_->next = lir; |
| 1071 | lir->prev = last_lir_insn_; |
| 1072 | lir->next = NULL; |
| 1073 | last_lir_insn_ = lir; |
| 1074 | } |
| 1075 | } |
| 1076 | |
| 1077 | /* |
| 1078 | * Insert an LIR instruction before the current instruction, which cannot be the |
| 1079 | * first instruction. |
| 1080 | * |
| 1081 | * prev_lir <-> new_lir <-> current_lir |
| 1082 | */ |
| 1083 | void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) |
| 1084 | { |
| 1085 | DCHECK(current_lir->prev != NULL); |
| 1086 | LIR *prev_lir = current_lir->prev; |
| 1087 | |
| 1088 | prev_lir->next = new_lir; |
| 1089 | new_lir->prev = prev_lir; |
| 1090 | new_lir->next = current_lir; |
| 1091 | current_lir->prev = new_lir; |
| 1092 | } |
| 1093 | |
| 1094 | /* |
| 1095 | * Insert an LIR instruction after the current instruction, which cannot be the |
| 1096 | * first instruction. |
| 1097 | * |
| 1098 | * current_lir -> new_lir -> old_next |
| 1099 | */ |
| 1100 | void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) |
| 1101 | { |
| 1102 | new_lir->prev = current_lir; |
| 1103 | new_lir->next = current_lir->next; |
| 1104 | current_lir->next = new_lir; |
| 1105 | new_lir->next->prev = new_lir; |
| 1106 | } |
| 1107 | |
| 1108 | |
| 1109 | } // namespace art |