Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1 | // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #ifndef V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_ |
| 6 | #define V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_ |
| 7 | |
| 8 | #include "src/ast/scopes.h" |
| 9 | #include "src/crankshaft/lithium-codegen.h" |
| 10 | #include "src/crankshaft/mips/lithium-gap-resolver-mips.h" |
| 11 | #include "src/crankshaft/mips/lithium-mips.h" |
| 12 | #include "src/deoptimizer.h" |
| 13 | #include "src/safepoint-table.h" |
| 14 | #include "src/utils.h" |
| 15 | |
| 16 | namespace v8 { |
| 17 | namespace internal { |
| 18 | |
| 19 | // Forward declarations. |
| 20 | class LDeferredCode; |
| 21 | class SafepointGenerator; |
| 22 | |
| 23 | class LCodeGen: public LCodeGenBase { |
| 24 | public: |
| 25 | LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) |
| 26 | : LCodeGenBase(chunk, assembler, info), |
| 27 | jump_table_(4, info->zone()), |
| 28 | scope_(info->scope()), |
| 29 | deferred_(8, info->zone()), |
| 30 | frame_is_built_(false), |
| 31 | safepoints_(info->zone()), |
| 32 | resolver_(this), |
| 33 | expected_safepoint_kind_(Safepoint::kSimple) { |
| 34 | PopulateDeoptimizationLiteralsWithInlinedFunctions(); |
| 35 | } |
| 36 | |
| 37 | |
| 38 | int LookupDestination(int block_id) const { |
| 39 | return chunk()->LookupDestination(block_id); |
| 40 | } |
| 41 | |
| 42 | bool IsNextEmittedBlock(int block_id) const { |
| 43 | return LookupDestination(block_id) == GetNextEmittedBlock(); |
| 44 | } |
| 45 | |
| 46 | bool NeedsEagerFrame() const { |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 47 | return HasAllocatedStackSlots() || info()->is_non_deferred_calling() || |
| 48 | !info()->IsStub() || info()->requires_frame(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 49 | } |
| 50 | bool NeedsDeferredFrame() const { |
| 51 | return !NeedsEagerFrame() && info()->is_deferred_calling(); |
| 52 | } |
| 53 | |
| 54 | RAStatus GetRAState() const { |
| 55 | return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved; |
| 56 | } |
| 57 | |
| 58 | // Support for converting LOperands to assembler types. |
| 59 | // LOperand must be a register. |
| 60 | Register ToRegister(LOperand* op) const; |
| 61 | |
| 62 | // LOperand is loaded into scratch, unless already a register. |
| 63 | Register EmitLoadRegister(LOperand* op, Register scratch); |
| 64 | |
| 65 | // LOperand must be a double register. |
| 66 | DoubleRegister ToDoubleRegister(LOperand* op) const; |
| 67 | |
| 68 | // LOperand is loaded into dbl_scratch, unless already a double register. |
| 69 | DoubleRegister EmitLoadDoubleRegister(LOperand* op, |
| 70 | FloatRegister flt_scratch, |
| 71 | DoubleRegister dbl_scratch); |
| 72 | int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const; |
| 73 | int32_t ToInteger32(LConstantOperand* op) const; |
| 74 | Smi* ToSmi(LConstantOperand* op) const; |
| 75 | double ToDouble(LConstantOperand* op) const; |
| 76 | Operand ToOperand(LOperand* op); |
| 77 | MemOperand ToMemOperand(LOperand* op) const; |
| 78 | // Returns a MemOperand pointing to the high word of a DoubleStackSlot. |
| 79 | MemOperand ToHighMemOperand(LOperand* op) const; |
| 80 | |
| 81 | bool IsInteger32(LConstantOperand* op) const; |
| 82 | bool IsSmi(LConstantOperand* op) const; |
| 83 | Handle<Object> ToHandle(LConstantOperand* op) const; |
| 84 | |
| 85 | // Try to generate code for the entire chunk, but it may fail if the |
| 86 | // chunk contains constructs we cannot handle. Returns true if the |
| 87 | // code generation attempt succeeded. |
| 88 | bool GenerateCode(); |
| 89 | |
| 90 | // Finish the code by setting stack height, safepoint, and bailout |
| 91 | // information on it. |
| 92 | void FinishCode(Handle<Code> code); |
| 93 | |
| 94 | void DoDeferredNumberTagD(LNumberTagD* instr); |
| 95 | |
| 96 | enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; |
| 97 | void DoDeferredNumberTagIU(LInstruction* instr, |
| 98 | LOperand* value, |
| 99 | LOperand* temp1, |
| 100 | LOperand* temp2, |
| 101 | IntegerSignedness signedness); |
| 102 | |
| 103 | void DoDeferredTaggedToI(LTaggedToI* instr); |
| 104 | void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); |
| 105 | void DoDeferredStackCheck(LStackCheck* instr); |
| 106 | void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr); |
| 107 | void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); |
| 108 | void DoDeferredStringCharFromCode(LStringCharFromCode* instr); |
| 109 | void DoDeferredAllocate(LAllocate* instr); |
| 110 | void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); |
| 111 | void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
| 112 | Register result, |
| 113 | Register object, |
| 114 | Register index); |
| 115 | |
| 116 | // Parallel move support. |
| 117 | void DoParallelMove(LParallelMove* move); |
| 118 | void DoGap(LGap* instr); |
| 119 | |
| 120 | MemOperand PrepareKeyedOperand(Register key, |
| 121 | Register base, |
| 122 | bool key_is_constant, |
| 123 | int constant_key, |
| 124 | int element_size, |
| 125 | int shift_size, |
| 126 | int base_offset); |
| 127 | |
| 128 | // Emit frame translation commands for an environment. |
| 129 | void WriteTranslation(LEnvironment* environment, Translation* translation); |
| 130 | |
| 131 | // Declare methods that deal with the individual node types. |
| 132 | #define DECLARE_DO(type) void Do##type(L##type* node); |
| 133 | LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) |
| 134 | #undef DECLARE_DO |
| 135 | |
| 136 | private: |
| 137 | LanguageMode language_mode() const { return info()->language_mode(); } |
| 138 | |
| 139 | Scope* scope() const { return scope_; } |
| 140 | |
| 141 | Register scratch0() { return kLithiumScratchReg; } |
| 142 | Register scratch1() { return kLithiumScratchReg2; } |
| 143 | DoubleRegister double_scratch0() { return kLithiumScratchDouble; } |
| 144 | |
| 145 | LInstruction* GetNextInstruction(); |
| 146 | |
| 147 | void EmitClassOfTest(Label* if_true, |
| 148 | Label* if_false, |
| 149 | Handle<String> class_name, |
| 150 | Register input, |
| 151 | Register temporary, |
| 152 | Register temporary2); |
| 153 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 154 | bool HasAllocatedStackSlots() const { |
| 155 | return chunk()->HasAllocatedStackSlots(); |
| 156 | } |
| 157 | int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); } |
| 158 | int GetTotalFrameSlotCount() const { |
| 159 | return chunk()->GetTotalFrameSlotCount(); |
| 160 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 161 | |
| 162 | void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } |
| 163 | |
| 164 | void SaveCallerDoubles(); |
| 165 | void RestoreCallerDoubles(); |
| 166 | |
| 167 | // Code generation passes. Returns true if code generation should |
| 168 | // continue. |
| 169 | void GenerateBodyInstructionPre(LInstruction* instr) override; |
| 170 | bool GeneratePrologue(); |
| 171 | bool GenerateDeferredCode(); |
| 172 | bool GenerateJumpTable(); |
| 173 | bool GenerateSafepointTable(); |
| 174 | |
| 175 | // Generates the custom OSR entrypoint and sets the osr_pc_offset. |
| 176 | void GenerateOsrPrologue(); |
| 177 | |
| 178 | enum SafepointMode { |
| 179 | RECORD_SIMPLE_SAFEPOINT, |
| 180 | RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS |
| 181 | }; |
| 182 | |
| 183 | void CallCode(Handle<Code> code, |
| 184 | RelocInfo::Mode mode, |
| 185 | LInstruction* instr); |
| 186 | |
| 187 | void CallCodeGeneric(Handle<Code> code, |
| 188 | RelocInfo::Mode mode, |
| 189 | LInstruction* instr, |
| 190 | SafepointMode safepoint_mode); |
| 191 | |
| 192 | void CallRuntime(const Runtime::Function* function, |
| 193 | int num_arguments, |
| 194 | LInstruction* instr, |
| 195 | SaveFPRegsMode save_doubles = kDontSaveFPRegs); |
| 196 | |
| 197 | void CallRuntime(Runtime::FunctionId id, |
| 198 | int num_arguments, |
| 199 | LInstruction* instr) { |
| 200 | const Runtime::Function* function = Runtime::FunctionForId(id); |
| 201 | CallRuntime(function, num_arguments, instr); |
| 202 | } |
| 203 | |
| 204 | void CallRuntime(Runtime::FunctionId id, LInstruction* instr) { |
| 205 | const Runtime::Function* function = Runtime::FunctionForId(id); |
| 206 | CallRuntime(function, function->nargs, instr); |
| 207 | } |
| 208 | |
| 209 | void LoadContextFromDeferred(LOperand* context); |
| 210 | void CallRuntimeFromDeferred(Runtime::FunctionId id, |
| 211 | int argc, |
| 212 | LInstruction* instr, |
| 213 | LOperand* context); |
| 214 | |
| 215 | // Generate a direct call to a known function. Expects the function |
| 216 | // to be in a1. |
| 217 | void CallKnownFunction(Handle<JSFunction> function, |
| 218 | int formal_parameter_count, int arity, |
| 219 | LInstruction* instr); |
| 220 | |
| 221 | void RecordSafepointWithLazyDeopt(LInstruction* instr, |
| 222 | SafepointMode safepoint_mode); |
| 223 | |
| 224 | void RegisterEnvironmentForDeoptimization(LEnvironment* environment, |
| 225 | Safepoint::DeoptMode mode); |
| 226 | void DeoptimizeIf(Condition condition, LInstruction* instr, |
| 227 | Deoptimizer::DeoptReason deopt_reason, |
| 228 | Deoptimizer::BailoutType bailout_type, |
| 229 | Register src1 = zero_reg, |
| 230 | const Operand& src2 = Operand(zero_reg)); |
| 231 | void DeoptimizeIf( |
| 232 | Condition condition, LInstruction* instr, |
| 233 | Deoptimizer::DeoptReason deopt_reason = Deoptimizer::kNoReason, |
| 234 | Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg)); |
| 235 | |
| 236 | void AddToTranslation(LEnvironment* environment, |
| 237 | Translation* translation, |
| 238 | LOperand* op, |
| 239 | bool is_tagged, |
| 240 | bool is_uint32, |
| 241 | int* object_index_pointer, |
| 242 | int* dematerialized_index_pointer); |
| 243 | |
| 244 | Register ToRegister(int index) const; |
| 245 | DoubleRegister ToDoubleRegister(int index) const; |
| 246 | |
| 247 | MemOperand BuildSeqStringOperand(Register string, |
| 248 | LOperand* index, |
| 249 | String::Encoding encoding); |
| 250 | |
| 251 | void EmitIntegerMathAbs(LMathAbs* instr); |
| 252 | |
| 253 | // Support for recording safepoint and position information. |
| 254 | void RecordSafepoint(LPointerMap* pointers, |
| 255 | Safepoint::Kind kind, |
| 256 | int arguments, |
| 257 | Safepoint::DeoptMode mode); |
| 258 | void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); |
| 259 | void RecordSafepoint(Safepoint::DeoptMode mode); |
| 260 | void RecordSafepointWithRegisters(LPointerMap* pointers, |
| 261 | int arguments, |
| 262 | Safepoint::DeoptMode mode); |
| 263 | |
| 264 | void RecordAndWritePosition(int position) override; |
| 265 | |
| 266 | static Condition TokenToCondition(Token::Value op, bool is_unsigned); |
| 267 | void EmitGoto(int block); |
| 268 | |
| 269 | // EmitBranch expects to be the last instruction of a block. |
| 270 | template<class InstrType> |
| 271 | void EmitBranch(InstrType instr, |
| 272 | Condition condition, |
| 273 | Register src1, |
| 274 | const Operand& src2); |
| 275 | template<class InstrType> |
| 276 | void EmitBranchF(InstrType instr, |
| 277 | Condition condition, |
| 278 | FPURegister src1, |
| 279 | FPURegister src2); |
| 280 | template <class InstrType> |
| 281 | void EmitTrueBranch(InstrType instr, Condition condition, Register src1, |
| 282 | const Operand& src2); |
| 283 | template <class InstrType> |
| 284 | void EmitFalseBranch(InstrType instr, Condition condition, Register src1, |
| 285 | const Operand& src2); |
| 286 | template<class InstrType> |
| 287 | void EmitFalseBranchF(InstrType instr, |
| 288 | Condition condition, |
| 289 | FPURegister src1, |
| 290 | FPURegister src2); |
| 291 | void EmitCmpI(LOperand* left, LOperand* right); |
| 292 | void EmitNumberUntagD(LNumberUntagD* instr, Register input, |
| 293 | DoubleRegister result, NumberUntagDMode mode); |
| 294 | |
| 295 | // Emits optimized code for typeof x == "y". Modifies input register. |
| 296 | // Returns the condition on which a final split to |
| 297 | // true and false label should be made, to optimize fallthrough. |
| 298 | // Returns two registers in cmp1 and cmp2 that can be used in the |
| 299 | // Branch instruction after EmitTypeofIs. |
| 300 | Condition EmitTypeofIs(Label* true_label, |
| 301 | Label* false_label, |
| 302 | Register input, |
| 303 | Handle<String> type_name, |
| 304 | Register* cmp1, |
| 305 | Operand* cmp2); |
| 306 | |
| 307 | // Emits optimized code for %_IsString(x). Preserves input register. |
| 308 | // Returns the condition on which a final split to |
| 309 | // true and false label should be made, to optimize fallthrough. |
| 310 | Condition EmitIsString(Register input, |
| 311 | Register temp1, |
| 312 | Label* is_not_string, |
| 313 | SmiCheck check_needed); |
| 314 | |
| 315 | // Emits optimized code to deep-copy the contents of statically known |
| 316 | // object graphs (e.g. object literal boilerplate). |
| 317 | void EmitDeepCopy(Handle<JSObject> object, |
| 318 | Register result, |
| 319 | Register source, |
| 320 | int* offset, |
| 321 | AllocationSiteMode mode); |
| 322 | // Emit optimized code for integer division. |
| 323 | // Inputs are signed. |
| 324 | // All registers are clobbered. |
| 325 | // If 'remainder' is no_reg, it is not computed. |
| 326 | void EmitSignedIntegerDivisionByConstant(Register result, |
| 327 | Register dividend, |
| 328 | int32_t divisor, |
| 329 | Register remainder, |
| 330 | Register scratch, |
| 331 | LEnvironment* environment); |
| 332 | |
| 333 | |
| 334 | void EnsureSpaceForLazyDeopt(int space_needed) override; |
| 335 | void DoLoadKeyedExternalArray(LLoadKeyed* instr); |
| 336 | void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); |
| 337 | void DoLoadKeyedFixedArray(LLoadKeyed* instr); |
| 338 | void DoStoreKeyedExternalArray(LStoreKeyed* instr); |
| 339 | void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); |
| 340 | void DoStoreKeyedFixedArray(LStoreKeyed* instr); |
| 341 | |
| 342 | template <class T> |
| 343 | void EmitVectorLoadICRegisters(T* instr); |
| 344 | template <class T> |
| 345 | void EmitVectorStoreICRegisters(T* instr); |
| 346 | |
| 347 | ZoneList<Deoptimizer::JumpTableEntry> jump_table_; |
| 348 | Scope* const scope_; |
| 349 | ZoneList<LDeferredCode*> deferred_; |
| 350 | bool frame_is_built_; |
| 351 | |
| 352 | // Builder that keeps track of safepoints in the code. The table |
| 353 | // itself is emitted at the end of the generated code. |
| 354 | SafepointTableBuilder safepoints_; |
| 355 | |
| 356 | // Compiler from a set of parallel moves to a sequential list of moves. |
| 357 | LGapResolver resolver_; |
| 358 | |
| 359 | Safepoint::Kind expected_safepoint_kind_; |
| 360 | |
| 361 | class PushSafepointRegistersScope final BASE_EMBEDDED { |
| 362 | public: |
| 363 | explicit PushSafepointRegistersScope(LCodeGen* codegen) |
| 364 | : codegen_(codegen) { |
| 365 | DCHECK(codegen_->info()->is_calling()); |
| 366 | DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); |
| 367 | codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; |
| 368 | |
| 369 | StoreRegistersStateStub stub(codegen_->isolate()); |
| 370 | codegen_->masm_->push(ra); |
| 371 | codegen_->masm_->CallStub(&stub); |
| 372 | } |
| 373 | |
| 374 | ~PushSafepointRegistersScope() { |
| 375 | DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters); |
| 376 | RestoreRegistersStateStub stub(codegen_->isolate()); |
| 377 | codegen_->masm_->push(ra); |
| 378 | codegen_->masm_->CallStub(&stub); |
| 379 | codegen_->expected_safepoint_kind_ = Safepoint::kSimple; |
| 380 | } |
| 381 | |
| 382 | private: |
| 383 | LCodeGen* codegen_; |
| 384 | }; |
| 385 | |
| 386 | friend class LDeferredCode; |
| 387 | friend class LEnvironment; |
| 388 | friend class SafepointGenerator; |
| 389 | DISALLOW_COPY_AND_ASSIGN(LCodeGen); |
| 390 | }; |
| 391 | |
| 392 | |
| 393 | class LDeferredCode : public ZoneObject { |
| 394 | public: |
| 395 | explicit LDeferredCode(LCodeGen* codegen) |
| 396 | : codegen_(codegen), |
| 397 | external_exit_(NULL), |
| 398 | instruction_index_(codegen->current_instruction_) { |
| 399 | codegen->AddDeferredCode(this); |
| 400 | } |
| 401 | |
| 402 | virtual ~LDeferredCode() {} |
| 403 | virtual void Generate() = 0; |
| 404 | virtual LInstruction* instr() = 0; |
| 405 | |
| 406 | void SetExit(Label* exit) { external_exit_ = exit; } |
| 407 | Label* entry() { return &entry_; } |
| 408 | Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } |
| 409 | int instruction_index() const { return instruction_index_; } |
| 410 | |
| 411 | protected: |
| 412 | LCodeGen* codegen() const { return codegen_; } |
| 413 | MacroAssembler* masm() const { return codegen_->masm(); } |
| 414 | |
| 415 | private: |
| 416 | LCodeGen* codegen_; |
| 417 | Label entry_; |
| 418 | Label exit_; |
| 419 | Label* external_exit_; |
| 420 | int instruction_index_; |
| 421 | }; |
| 422 | |
| 423 | } // namespace internal |
| 424 | } // namespace v8 |
| 425 | |
| 426 | #endif // V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_ |