blob: 41ed95ed615da98fb4ef696dc89967103c7aa6a2 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
6
7#include "src/code-factory.h"
8#include "src/code-stubs.h"
9#include "src/crankshaft/hydrogen-osr.h"
10#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
11#include "src/ic/ic.h"
12#include "src/ic/stub-cache.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000013
14namespace v8 {
15namespace internal {
16
17
18class SafepointGenerator final : public CallWrapper {
19 public:
20 SafepointGenerator(LCodeGen* codegen,
21 LPointerMap* pointers,
22 Safepoint::DeoptMode mode)
23 : codegen_(codegen),
24 pointers_(pointers),
25 deopt_mode_(mode) { }
26 virtual ~SafepointGenerator() {}
27
28 void BeforeCall(int call_size) const override {}
29
30 void AfterCall() const override {
31 codegen_->RecordSafepoint(pointers_, deopt_mode_);
32 }
33
34 private:
35 LCodeGen* codegen_;
36 LPointerMap* pointers_;
37 Safepoint::DeoptMode deopt_mode_;
38};
39
40
41#define __ masm()->
42
43bool LCodeGen::GenerateCode() {
44 LPhase phase("Z_Code generation", chunk());
45 DCHECK(is_unused());
46 status_ = GENERATING;
47
48 // Open a frame scope to indicate that there is a frame on the stack. The
49 // NONE indicates that the scope shouldn't actually generate code to set up
50 // the frame (that is done in GeneratePrologue).
51 FrameScope frame_scope(masm_, StackFrame::NONE);
52
53 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
54 GenerateJumpTable() && GenerateSafepointTable();
55}
56
57
58void LCodeGen::FinishCode(Handle<Code> code) {
59 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +010060 code->set_stack_slots(GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000061 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
62 PopulateDeoptimizationData(code);
63}
64
65
66void LCodeGen::SaveCallerDoubles() {
67 DCHECK(info()->saves_caller_doubles());
68 DCHECK(NeedsEagerFrame());
69 Comment(";;; Save clobbered callee double registers");
70 int count = 0;
71 BitVector* doubles = chunk()->allocated_double_registers();
72 BitVector::Iterator save_iterator(doubles);
73 while (!save_iterator.Done()) {
74 __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
75 MemOperand(sp, count * kDoubleSize));
76 save_iterator.Advance();
77 count++;
78 }
79}
80
81
82void LCodeGen::RestoreCallerDoubles() {
83 DCHECK(info()->saves_caller_doubles());
84 DCHECK(NeedsEagerFrame());
85 Comment(";;; Restore clobbered callee double registers");
86 BitVector* doubles = chunk()->allocated_double_registers();
87 BitVector::Iterator save_iterator(doubles);
88 int count = 0;
89 while (!save_iterator.Done()) {
90 __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
91 MemOperand(sp, count * kDoubleSize));
92 save_iterator.Advance();
93 count++;
94 }
95}
96
97
98bool LCodeGen::GeneratePrologue() {
99 DCHECK(is_generating());
100
101 if (info()->IsOptimizing()) {
102 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
103
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000104 // a1: Callee's JS function.
105 // cp: Callee's context.
106 // fp: Caller's frame pointer.
107 // lr: Caller's pc.
108 }
109
110 info()->set_prologue_offset(masm_->pc_offset());
111 if (NeedsEagerFrame()) {
112 if (info()->IsStub()) {
Ben Murdochda12d292016-06-02 14:46:10 +0100113 __ StubPrologue(StackFrame::STUB);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000114 } else {
115 __ Prologue(info()->GeneratePreagedPrologue());
116 }
117 frame_is_built_ = true;
118 }
119
120 // Reserve space for the stack slots needed by the code.
121 int slots = GetStackSlotCount();
122 if (slots > 0) {
123 if (FLAG_debug_code) {
124 __ Dsubu(sp, sp, Operand(slots * kPointerSize));
125 __ Push(a0, a1);
126 __ Daddu(a0, sp, Operand(slots * kPointerSize));
127 __ li(a1, Operand(kSlotsZapValue));
128 Label loop;
129 __ bind(&loop);
130 __ Dsubu(a0, a0, Operand(kPointerSize));
131 __ sd(a1, MemOperand(a0, 2 * kPointerSize));
132 __ Branch(&loop, ne, a0, Operand(sp));
133 __ Pop(a0, a1);
134 } else {
135 __ Dsubu(sp, sp, Operand(slots * kPointerSize));
136 }
137 }
138
139 if (info()->saves_caller_doubles()) {
140 SaveCallerDoubles();
141 }
142 return !is_aborted();
143}
144
145
146void LCodeGen::DoPrologue(LPrologue* instr) {
147 Comment(";;; Prologue begin");
148
149 // Possibly allocate a local context.
150 if (info()->scope()->num_heap_slots() > 0) {
151 Comment(";;; Allocate local context");
152 bool need_write_barrier = true;
153 // Argument to NewContext is the function, which is in a1.
154 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
155 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
156 if (info()->scope()->is_script_scope()) {
157 __ push(a1);
158 __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
159 __ CallRuntime(Runtime::kNewScriptContext);
160 deopt_mode = Safepoint::kLazyDeopt;
161 } else if (slots <= FastNewContextStub::kMaximumSlots) {
162 FastNewContextStub stub(isolate(), slots);
163 __ CallStub(&stub);
164 // Result of FastNewContextStub is always in new space.
165 need_write_barrier = false;
166 } else {
167 __ push(a1);
168 __ CallRuntime(Runtime::kNewFunctionContext);
169 }
170 RecordSafepoint(deopt_mode);
171
172 // Context is returned in both v0. It replaces the context passed to us.
173 // It's saved in the stack and kept live in cp.
174 __ mov(cp, v0);
175 __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
176 // Copy any necessary parameters into the context.
177 int num_parameters = scope()->num_parameters();
178 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
179 for (int i = first_parameter; i < num_parameters; i++) {
180 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
181 if (var->IsContextSlot()) {
182 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
183 (num_parameters - 1 - i) * kPointerSize;
184 // Load parameter from stack.
185 __ ld(a0, MemOperand(fp, parameter_offset));
186 // Store it in the context.
187 MemOperand target = ContextMemOperand(cp, var->index());
188 __ sd(a0, target);
189 // Update the write barrier. This clobbers a3 and a0.
190 if (need_write_barrier) {
191 __ RecordWriteContextSlot(
192 cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
193 } else if (FLAG_debug_code) {
194 Label done;
195 __ JumpIfInNewSpace(cp, a0, &done);
196 __ Abort(kExpectedNewSpaceObject);
197 __ bind(&done);
198 }
199 }
200 }
201 Comment(";;; End allocate local context");
202 }
203
204 Comment(";;; Prologue end");
205}
206
207
208void LCodeGen::GenerateOsrPrologue() {
209 // Generate the OSR entry prologue at the first unknown OSR value, or if there
210 // are none, at the OSR entrypoint instruction.
211 if (osr_pc_offset_ >= 0) return;
212
213 osr_pc_offset_ = masm()->pc_offset();
214
215 // Adjust the frame size, subsuming the unoptimized frame into the
216 // optimized frame.
217 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
218 DCHECK(slots >= 0);
219 __ Dsubu(sp, sp, Operand(slots * kPointerSize));
220}
221
222
223void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
224 if (instr->IsCall()) {
225 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
226 }
227 if (!instr->IsLazyBailout() && !instr->IsGap()) {
228 safepoints_.BumpLastLazySafepointIndex();
229 }
230}
231
232
233bool LCodeGen::GenerateDeferredCode() {
234 DCHECK(is_generating());
235 if (deferred_.length() > 0) {
236 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
237 LDeferredCode* code = deferred_[i];
238
239 HValue* value =
240 instructions_->at(code->instruction_index())->hydrogen_value();
241 RecordAndWritePosition(
242 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
243
244 Comment(";;; <@%d,#%d> "
245 "-------------------- Deferred %s --------------------",
246 code->instruction_index(),
247 code->instr()->hydrogen_value()->id(),
248 code->instr()->Mnemonic());
249 __ bind(code->entry());
250 if (NeedsDeferredFrame()) {
251 Comment(";;; Build frame");
252 DCHECK(!frame_is_built_);
253 DCHECK(info()->IsStub());
254 frame_is_built_ = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000255 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
Ben Murdochda12d292016-06-02 14:46:10 +0100256 __ PushCommonFrame(scratch0());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000257 Comment(";;; Deferred code");
258 }
259 code->Generate();
260 if (NeedsDeferredFrame()) {
261 Comment(";;; Destroy frame");
262 DCHECK(frame_is_built_);
Ben Murdochda12d292016-06-02 14:46:10 +0100263 __ PopCommonFrame(scratch0());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000264 frame_is_built_ = false;
265 }
266 __ jmp(code->exit());
267 }
268 }
269 // Deferred code is the last part of the instruction sequence. Mark
270 // the generated code as done unless we bailed out.
271 if (!is_aborted()) status_ = DONE;
272 return !is_aborted();
273}
274
275
276bool LCodeGen::GenerateJumpTable() {
277 if (jump_table_.length() > 0) {
278 Comment(";;; -------------------- Jump table --------------------");
279 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
280 Label table_start, call_deopt_entry;
281
282 __ bind(&table_start);
283 Label needs_frame;
284 Address base = jump_table_[0]->address;
285 for (int i = 0; i < jump_table_.length(); i++) {
286 Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
287 __ bind(&table_entry->label);
288 Address entry = table_entry->address;
289 DeoptComment(table_entry->deopt_info);
290
291 // Second-level deopt table entries are contiguous and small, so instead
292 // of loading the full, absolute address of each one, load the base
293 // address and add an immediate offset.
294 if (is_int16(entry - base)) {
295 if (table_entry->needs_frame) {
296 DCHECK(!info()->saves_caller_doubles());
297 Comment(";;; call deopt with frame");
Ben Murdochda12d292016-06-02 14:46:10 +0100298 __ PushCommonFrame();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000299 __ BranchAndLink(&needs_frame, USE_DELAY_SLOT);
300 __ li(t9, Operand(entry - base));
301 } else {
302 __ BranchAndLink(&call_deopt_entry, USE_DELAY_SLOT);
303 __ li(t9, Operand(entry - base));
304 }
305
306 } else {
307 __ li(t9, Operand(entry - base));
308 if (table_entry->needs_frame) {
309 DCHECK(!info()->saves_caller_doubles());
310 Comment(";;; call deopt with frame");
Ben Murdochda12d292016-06-02 14:46:10 +0100311 __ PushCommonFrame();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000312 __ BranchAndLink(&needs_frame);
313 } else {
314 __ BranchAndLink(&call_deopt_entry);
315 }
316 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000317 }
318 if (needs_frame.is_linked()) {
319 __ bind(&needs_frame);
320 // This variant of deopt can only be used with stubs. Since we don't
321 // have a function pointer to install in the stack frame that we're
322 // building, install a special marker there instead.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000323 __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
324 __ push(at);
Ben Murdochda12d292016-06-02 14:46:10 +0100325 DCHECK(info()->IsStub());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000326 }
327
328 Comment(";;; call deopt");
329 __ bind(&call_deopt_entry);
330
331 if (info()->saves_caller_doubles()) {
332 DCHECK(info()->IsStub());
333 RestoreCallerDoubles();
334 }
335
336 __ li(at,
337 Operand(reinterpret_cast<int64_t>(base), RelocInfo::RUNTIME_ENTRY));
338 __ Daddu(t9, t9, Operand(at));
339 __ Jump(t9);
340 }
341 // The deoptimization jump table is the last part of the instruction
342 // sequence. Mark the generated code as done unless we bailed out.
343 if (!is_aborted()) status_ = DONE;
344 return !is_aborted();
345}
346
347
348bool LCodeGen::GenerateSafepointTable() {
349 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100350 safepoints_.Emit(masm(), GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000351 return !is_aborted();
352}
353
354
355Register LCodeGen::ToRegister(int index) const {
356 return Register::from_code(index);
357}
358
359
360DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
361 return DoubleRegister::from_code(index);
362}
363
364
365Register LCodeGen::ToRegister(LOperand* op) const {
366 DCHECK(op->IsRegister());
367 return ToRegister(op->index());
368}
369
370
371Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
372 if (op->IsRegister()) {
373 return ToRegister(op->index());
374 } else if (op->IsConstantOperand()) {
375 LConstantOperand* const_op = LConstantOperand::cast(op);
376 HConstant* constant = chunk_->LookupConstant(const_op);
377 Handle<Object> literal = constant->handle(isolate());
378 Representation r = chunk_->LookupLiteralRepresentation(const_op);
379 if (r.IsInteger32()) {
380 AllowDeferredHandleDereference get_number;
381 DCHECK(literal->IsNumber());
382 __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
383 } else if (r.IsSmi()) {
384 DCHECK(constant->HasSmiValue());
385 __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
386 } else if (r.IsDouble()) {
387 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
388 } else {
389 DCHECK(r.IsSmiOrTagged());
390 __ li(scratch, literal);
391 }
392 return scratch;
393 } else if (op->IsStackSlot()) {
394 __ ld(scratch, ToMemOperand(op));
395 return scratch;
396 }
397 UNREACHABLE();
398 return scratch;
399}
400
401
402DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
403 DCHECK(op->IsDoubleRegister());
404 return ToDoubleRegister(op->index());
405}
406
407
408DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
409 FloatRegister flt_scratch,
410 DoubleRegister dbl_scratch) {
411 if (op->IsDoubleRegister()) {
412 return ToDoubleRegister(op->index());
413 } else if (op->IsConstantOperand()) {
414 LConstantOperand* const_op = LConstantOperand::cast(op);
415 HConstant* constant = chunk_->LookupConstant(const_op);
416 Handle<Object> literal = constant->handle(isolate());
417 Representation r = chunk_->LookupLiteralRepresentation(const_op);
418 if (r.IsInteger32()) {
419 DCHECK(literal->IsNumber());
420 __ li(at, Operand(static_cast<int32_t>(literal->Number())));
421 __ mtc1(at, flt_scratch);
422 __ cvt_d_w(dbl_scratch, flt_scratch);
423 return dbl_scratch;
424 } else if (r.IsDouble()) {
425 Abort(kUnsupportedDoubleImmediate);
426 } else if (r.IsTagged()) {
427 Abort(kUnsupportedTaggedImmediate);
428 }
429 } else if (op->IsStackSlot()) {
430 MemOperand mem_op = ToMemOperand(op);
431 __ ldc1(dbl_scratch, mem_op);
432 return dbl_scratch;
433 }
434 UNREACHABLE();
435 return dbl_scratch;
436}
437
438
439Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
440 HConstant* constant = chunk_->LookupConstant(op);
441 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
442 return constant->handle(isolate());
443}
444
445
446bool LCodeGen::IsInteger32(LConstantOperand* op) const {
447 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
448}
449
450
451bool LCodeGen::IsSmi(LConstantOperand* op) const {
452 return chunk_->LookupLiteralRepresentation(op).IsSmi();
453}
454
455
456int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
457 // return ToRepresentation(op, Representation::Integer32());
458 HConstant* constant = chunk_->LookupConstant(op);
459 return constant->Integer32Value();
460}
461
462
463int64_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op,
464 const Representation& r) const {
465 HConstant* constant = chunk_->LookupConstant(op);
466 int32_t value = constant->Integer32Value();
467 if (r.IsInteger32()) return value;
468 DCHECK(r.IsSmiOrTagged());
469 return reinterpret_cast<int64_t>(Smi::FromInt(value));
470}
471
472
473Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
474 HConstant* constant = chunk_->LookupConstant(op);
475 return Smi::FromInt(constant->Integer32Value());
476}
477
478
479double LCodeGen::ToDouble(LConstantOperand* op) const {
480 HConstant* constant = chunk_->LookupConstant(op);
481 DCHECK(constant->HasDoubleValue());
482 return constant->DoubleValue();
483}
484
485
486Operand LCodeGen::ToOperand(LOperand* op) {
487 if (op->IsConstantOperand()) {
488 LConstantOperand* const_op = LConstantOperand::cast(op);
489 HConstant* constant = chunk()->LookupConstant(const_op);
490 Representation r = chunk_->LookupLiteralRepresentation(const_op);
491 if (r.IsSmi()) {
492 DCHECK(constant->HasSmiValue());
493 return Operand(Smi::FromInt(constant->Integer32Value()));
494 } else if (r.IsInteger32()) {
495 DCHECK(constant->HasInteger32Value());
496 return Operand(constant->Integer32Value());
497 } else if (r.IsDouble()) {
498 Abort(kToOperandUnsupportedDoubleImmediate);
499 }
500 DCHECK(r.IsTagged());
501 return Operand(constant->handle(isolate()));
502 } else if (op->IsRegister()) {
503 return Operand(ToRegister(op));
504 } else if (op->IsDoubleRegister()) {
505 Abort(kToOperandIsDoubleRegisterUnimplemented);
506 return Operand((int64_t)0);
507 }
508 // Stack slots not implemented, use ToMemOperand instead.
509 UNREACHABLE();
510 return Operand((int64_t)0);
511}
512
513
514static int ArgumentsOffsetWithoutFrame(int index) {
515 DCHECK(index < 0);
516 return -(index + 1) * kPointerSize;
517}
518
519
520MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
521 DCHECK(!op->IsRegister());
522 DCHECK(!op->IsDoubleRegister());
523 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
524 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100525 return MemOperand(fp, FrameSlotToFPOffset(op->index()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000526 } else {
527 // Retrieve parameter without eager stack-frame relative to the
528 // stack-pointer.
529 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
530 }
531}
532
533
534MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
535 DCHECK(op->IsDoubleStackSlot());
536 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100537 // return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
538 return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kIntSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000539 } else {
540 // Retrieve parameter without eager stack-frame relative to the
541 // stack-pointer.
542 // return MemOperand(
543 // sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
544 return MemOperand(
545 sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize);
546 }
547}
548
549
550void LCodeGen::WriteTranslation(LEnvironment* environment,
551 Translation* translation) {
552 if (environment == NULL) return;
553
554 // The translation includes one command per value in the environment.
555 int translation_size = environment->translation_size();
556
557 WriteTranslation(environment->outer(), translation);
558 WriteTranslationFrame(environment, translation);
559
560 int object_index = 0;
561 int dematerialized_index = 0;
562 for (int i = 0; i < translation_size; ++i) {
563 LOperand* value = environment->values()->at(i);
564 AddToTranslation(
565 environment, translation, value, environment->HasTaggedValueAt(i),
566 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
567 }
568}
569
570
571void LCodeGen::AddToTranslation(LEnvironment* environment,
572 Translation* translation,
573 LOperand* op,
574 bool is_tagged,
575 bool is_uint32,
576 int* object_index_pointer,
577 int* dematerialized_index_pointer) {
578 if (op == LEnvironment::materialization_marker()) {
579 int object_index = (*object_index_pointer)++;
580 if (environment->ObjectIsDuplicateAt(object_index)) {
581 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
582 translation->DuplicateObject(dupe_of);
583 return;
584 }
585 int object_length = environment->ObjectLengthAt(object_index);
586 if (environment->ObjectIsArgumentsAt(object_index)) {
587 translation->BeginArgumentsObject(object_length);
588 } else {
589 translation->BeginCapturedObject(object_length);
590 }
591 int dematerialized_index = *dematerialized_index_pointer;
592 int env_offset = environment->translation_size() + dematerialized_index;
593 *dematerialized_index_pointer += object_length;
594 for (int i = 0; i < object_length; ++i) {
595 LOperand* value = environment->values()->at(env_offset + i);
596 AddToTranslation(environment,
597 translation,
598 value,
599 environment->HasTaggedValueAt(env_offset + i),
600 environment->HasUint32ValueAt(env_offset + i),
601 object_index_pointer,
602 dematerialized_index_pointer);
603 }
604 return;
605 }
606
607 if (op->IsStackSlot()) {
608 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000609 if (is_tagged) {
610 translation->StoreStackSlot(index);
611 } else if (is_uint32) {
612 translation->StoreUint32StackSlot(index);
613 } else {
614 translation->StoreInt32StackSlot(index);
615 }
616 } else if (op->IsDoubleStackSlot()) {
617 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000618 translation->StoreDoubleStackSlot(index);
619 } else if (op->IsRegister()) {
620 Register reg = ToRegister(op);
621 if (is_tagged) {
622 translation->StoreRegister(reg);
623 } else if (is_uint32) {
624 translation->StoreUint32Register(reg);
625 } else {
626 translation->StoreInt32Register(reg);
627 }
628 } else if (op->IsDoubleRegister()) {
629 DoubleRegister reg = ToDoubleRegister(op);
630 translation->StoreDoubleRegister(reg);
631 } else if (op->IsConstantOperand()) {
632 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
633 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
634 translation->StoreLiteral(src_index);
635 } else {
636 UNREACHABLE();
637 }
638}
639
640
641void LCodeGen::CallCode(Handle<Code> code,
642 RelocInfo::Mode mode,
643 LInstruction* instr) {
644 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
645}
646
647
648void LCodeGen::CallCodeGeneric(Handle<Code> code,
649 RelocInfo::Mode mode,
650 LInstruction* instr,
651 SafepointMode safepoint_mode) {
652 DCHECK(instr != NULL);
653 __ Call(code, mode);
654 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
655}
656
657
658void LCodeGen::CallRuntime(const Runtime::Function* function,
659 int num_arguments,
660 LInstruction* instr,
661 SaveFPRegsMode save_doubles) {
662 DCHECK(instr != NULL);
663
664 __ CallRuntime(function, num_arguments, save_doubles);
665
666 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
667}
668
669
670void LCodeGen::LoadContextFromDeferred(LOperand* context) {
671 if (context->IsRegister()) {
672 __ Move(cp, ToRegister(context));
673 } else if (context->IsStackSlot()) {
674 __ ld(cp, ToMemOperand(context));
675 } else if (context->IsConstantOperand()) {
676 HConstant* constant =
677 chunk_->LookupConstant(LConstantOperand::cast(context));
678 __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
679 } else {
680 UNREACHABLE();
681 }
682}
683
684
685void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
686 int argc,
687 LInstruction* instr,
688 LOperand* context) {
689 LoadContextFromDeferred(context);
690 __ CallRuntimeSaveDoubles(id);
691 RecordSafepointWithRegisters(
692 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
693}
694
695
696void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
697 Safepoint::DeoptMode mode) {
698 environment->set_has_been_used();
699 if (!environment->HasBeenRegistered()) {
700 // Physical stack frame layout:
701 // -x ............. -4 0 ..................................... y
702 // [incoming arguments] [spill slots] [pushed outgoing arguments]
703
704 // Layout of the environment:
705 // 0 ..................................................... size-1
706 // [parameters] [locals] [expression stack including arguments]
707
708 // Layout of the translation:
709 // 0 ........................................................ size - 1 + 4
710 // [expression stack including arguments] [locals] [4 words] [parameters]
711 // |>------------ translation_size ------------<|
712
713 int frame_count = 0;
714 int jsframe_count = 0;
715 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
716 ++frame_count;
717 if (e->frame_type() == JS_FUNCTION) {
718 ++jsframe_count;
719 }
720 }
721 Translation translation(&translations_, frame_count, jsframe_count, zone());
722 WriteTranslation(environment, &translation);
723 int deoptimization_index = deoptimizations_.length();
724 int pc_offset = masm()->pc_offset();
725 environment->Register(deoptimization_index,
726 translation.index(),
727 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
728 deoptimizations_.Add(environment, zone());
729 }
730}
731
732
733void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
734 Deoptimizer::DeoptReason deopt_reason,
735 Deoptimizer::BailoutType bailout_type,
736 Register src1, const Operand& src2) {
737 LEnvironment* environment = instr->environment();
738 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
739 DCHECK(environment->HasBeenRegistered());
740 int id = environment->deoptimization_index();
741 Address entry =
742 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
743 if (entry == NULL) {
744 Abort(kBailoutWasNotPrepared);
745 return;
746 }
747
748 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
749 Register scratch = scratch0();
750 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
751 Label no_deopt;
752 __ Push(a1, scratch);
753 __ li(scratch, Operand(count));
754 __ lw(a1, MemOperand(scratch));
755 __ Subu(a1, a1, Operand(1));
756 __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
757 __ li(a1, Operand(FLAG_deopt_every_n_times));
758 __ sw(a1, MemOperand(scratch));
759 __ Pop(a1, scratch);
760
761 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
762 __ bind(&no_deopt);
763 __ sw(a1, MemOperand(scratch));
764 __ Pop(a1, scratch);
765 }
766
767 if (info()->ShouldTrapOnDeopt()) {
768 Label skip;
769 if (condition != al) {
770 __ Branch(&skip, NegateCondition(condition), src1, src2);
771 }
772 __ stop("trap_on_deopt");
773 __ bind(&skip);
774 }
775
Ben Murdochc5610432016-08-08 18:44:38 +0100776 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000777
778 DCHECK(info()->IsStub() || frame_is_built_);
779 // Go through jump table if we need to handle condition, build frame, or
780 // restore caller doubles.
781 if (condition == al && frame_is_built_ &&
782 !info()->saves_caller_doubles()) {
783 DeoptComment(deopt_info);
784 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000785 } else {
786 Deoptimizer::JumpTableEntry* table_entry =
787 new (zone()) Deoptimizer::JumpTableEntry(
788 entry, deopt_info, bailout_type, !frame_is_built_);
789 // We often have several deopts to the same entry, reuse the last
790 // jump entry if this is the case.
Ben Murdoch61f157c2016-09-16 13:49:30 +0100791 if (FLAG_trace_deopt || isolate()->is_profiling() ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000792 jump_table_.is_empty() ||
793 !table_entry->IsEquivalentTo(*jump_table_.last())) {
794 jump_table_.Add(table_entry, zone());
795 }
796 __ Branch(&jump_table_.last()->label, condition, src1, src2);
797 }
798}
799
800
801void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
802 Deoptimizer::DeoptReason deopt_reason,
803 Register src1, const Operand& src2) {
804 Deoptimizer::BailoutType bailout_type = info()->IsStub()
805 ? Deoptimizer::LAZY
806 : Deoptimizer::EAGER;
807 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
808}
809
810
811void LCodeGen::RecordSafepointWithLazyDeopt(
812 LInstruction* instr, SafepointMode safepoint_mode) {
813 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
814 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
815 } else {
816 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
817 RecordSafepointWithRegisters(
818 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
819 }
820}
821
822
823void LCodeGen::RecordSafepoint(
824 LPointerMap* pointers,
825 Safepoint::Kind kind,
826 int arguments,
827 Safepoint::DeoptMode deopt_mode) {
828 DCHECK(expected_safepoint_kind_ == kind);
829
830 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
831 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
832 kind, arguments, deopt_mode);
833 for (int i = 0; i < operands->length(); i++) {
834 LOperand* pointer = operands->at(i);
835 if (pointer->IsStackSlot()) {
836 safepoint.DefinePointerSlot(pointer->index(), zone());
837 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
838 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
839 }
840 }
841}
842
843
844void LCodeGen::RecordSafepoint(LPointerMap* pointers,
845 Safepoint::DeoptMode deopt_mode) {
846 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
847}
848
849
850void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
851 LPointerMap empty_pointers(zone());
852 RecordSafepoint(&empty_pointers, deopt_mode);
853}
854
855
856void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
857 int arguments,
858 Safepoint::DeoptMode deopt_mode) {
859 RecordSafepoint(
860 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
861}
862
863
864void LCodeGen::RecordAndWritePosition(int position) {
865 if (position == RelocInfo::kNoPosition) return;
866 masm()->positions_recorder()->RecordPosition(position);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000867}
868
869
870static const char* LabelType(LLabel* label) {
871 if (label->is_loop_header()) return " (loop header)";
872 if (label->is_osr_entry()) return " (OSR entry)";
873 return "";
874}
875
876
877void LCodeGen::DoLabel(LLabel* label) {
878 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
879 current_instruction_,
880 label->hydrogen_value()->id(),
881 label->block_id(),
882 LabelType(label));
883 __ bind(label->label());
884 current_block_ = label->block_id();
885 DoGap(label);
886}
887
888
889void LCodeGen::DoParallelMove(LParallelMove* move) {
890 resolver_.Resolve(move);
891}
892
893
894void LCodeGen::DoGap(LGap* gap) {
895 for (int i = LGap::FIRST_INNER_POSITION;
896 i <= LGap::LAST_INNER_POSITION;
897 i++) {
898 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
899 LParallelMove* move = gap->GetParallelMove(inner_pos);
900 if (move != NULL) DoParallelMove(move);
901 }
902}
903
904
905void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
906 DoGap(instr);
907}
908
909
910void LCodeGen::DoParameter(LParameter* instr) {
911 // Nothing to do.
912}
913
914
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000915void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
916 GenerateOsrPrologue();
917}
918
919
920void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
921 Register dividend = ToRegister(instr->dividend());
922 int32_t divisor = instr->divisor();
923 DCHECK(dividend.is(ToRegister(instr->result())));
924
925 // Theoretically, a variation of the branch-free code for integer division by
926 // a power of 2 (calculating the remainder via an additional multiplication
927 // (which gets simplified to an 'and') and subtraction) should be faster, and
928 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
929 // indicate that positive dividends are heavily favored, so the branching
930 // version performs better.
931 HMod* hmod = instr->hydrogen();
932 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
933 Label dividend_is_not_negative, done;
934
935 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
936 __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
937 // Note: The code below even works when right contains kMinInt.
938 __ dsubu(dividend, zero_reg, dividend);
939 __ And(dividend, dividend, Operand(mask));
940 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
941 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
942 Operand(zero_reg));
943 }
944 __ Branch(USE_DELAY_SLOT, &done);
945 __ dsubu(dividend, zero_reg, dividend);
946 }
947
948 __ bind(&dividend_is_not_negative);
949 __ And(dividend, dividend, Operand(mask));
950 __ bind(&done);
951}
952
953
954void LCodeGen::DoModByConstI(LModByConstI* instr) {
955 Register dividend = ToRegister(instr->dividend());
956 int32_t divisor = instr->divisor();
957 Register result = ToRegister(instr->result());
958 DCHECK(!dividend.is(result));
959
960 if (divisor == 0) {
961 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
962 return;
963 }
964
965 __ TruncatingDiv(result, dividend, Abs(divisor));
966 __ Dmul(result, result, Operand(Abs(divisor)));
967 __ Dsubu(result, dividend, Operand(result));
968
969 // Check for negative zero.
970 HMod* hmod = instr->hydrogen();
971 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
972 Label remainder_not_zero;
973 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
974 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
975 Operand(zero_reg));
976 __ bind(&remainder_not_zero);
977 }
978}
979
980
981void LCodeGen::DoModI(LModI* instr) {
982 HMod* hmod = instr->hydrogen();
983 const Register left_reg = ToRegister(instr->left());
984 const Register right_reg = ToRegister(instr->right());
985 const Register result_reg = ToRegister(instr->result());
986
987 // div runs in the background while we check for special cases.
988 __ Dmod(result_reg, left_reg, right_reg);
989
990 Label done;
991 // Check for x % 0, we have to deopt in this case because we can't return a
992 // NaN.
993 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
994 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
995 Operand(zero_reg));
996 }
997
998 // Check for kMinInt % -1, div will return kMinInt, which is not what we
999 // want. We have to deopt if we care about -0, because we can't return that.
1000 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1001 Label no_overflow_possible;
1002 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1003 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1004 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
1005 } else {
1006 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1007 __ Branch(USE_DELAY_SLOT, &done);
1008 __ mov(result_reg, zero_reg);
1009 }
1010 __ bind(&no_overflow_possible);
1011 }
1012
1013 // If we care about -0, test if the dividend is <0 and the result is 0.
1014 __ Branch(&done, ge, left_reg, Operand(zero_reg));
1015
1016 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1017 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
1018 Operand(zero_reg));
1019 }
1020 __ bind(&done);
1021}
1022
1023
1024void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1025 Register dividend = ToRegister(instr->dividend());
1026 int32_t divisor = instr->divisor();
1027 Register result = ToRegister(instr->result());
1028 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1029 DCHECK(!result.is(dividend));
1030
1031 // Check for (0 / -x) that will produce negative zero.
1032 HDiv* hdiv = instr->hydrogen();
1033 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1034 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1035 Operand(zero_reg));
1036 }
1037 // Check for (kMinInt / -1).
1038 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1039 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
1040 }
1041 // Deoptimize if remainder will not be 0.
1042 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1043 divisor != 1 && divisor != -1) {
1044 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1045 __ And(at, dividend, Operand(mask));
1046 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
1047 }
1048
1049 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1050 __ Dsubu(result, zero_reg, dividend);
1051 return;
1052 }
1053 uint16_t shift = WhichPowerOf2Abs(divisor);
1054 if (shift == 0) {
1055 __ Move(result, dividend);
1056 } else if (shift == 1) {
1057 __ dsrl32(result, dividend, 31);
1058 __ Daddu(result, dividend, Operand(result));
1059 } else {
1060 __ dsra32(result, dividend, 31);
1061 __ dsrl32(result, result, 32 - shift);
1062 __ Daddu(result, dividend, Operand(result));
1063 }
1064 if (shift > 0) __ dsra(result, result, shift);
1065 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1066}
1067
1068
1069void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1070 Register dividend = ToRegister(instr->dividend());
1071 int32_t divisor = instr->divisor();
1072 Register result = ToRegister(instr->result());
1073 DCHECK(!dividend.is(result));
1074
1075 if (divisor == 0) {
1076 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1077 return;
1078 }
1079
1080 // Check for (0 / -x) that will produce negative zero.
1081 HDiv* hdiv = instr->hydrogen();
1082 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1083 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1084 Operand(zero_reg));
1085 }
1086
1087 __ TruncatingDiv(result, dividend, Abs(divisor));
1088 if (divisor < 0) __ Subu(result, zero_reg, result);
1089
1090 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1091 __ Dmul(scratch0(), result, Operand(divisor));
1092 __ Dsubu(scratch0(), scratch0(), dividend);
1093 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
1094 Operand(zero_reg));
1095 }
1096}
1097
1098
1099// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1100void LCodeGen::DoDivI(LDivI* instr) {
1101 HBinaryOperation* hdiv = instr->hydrogen();
1102 Register dividend = ToRegister(instr->dividend());
1103 Register divisor = ToRegister(instr->divisor());
1104 const Register result = ToRegister(instr->result());
1105
1106 // On MIPS div is asynchronous - it will run in the background while we
1107 // check for special cases.
1108 __ Div(result, dividend, divisor);
1109
1110 // Check for x / 0.
1111 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1112 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1113 Operand(zero_reg));
1114 }
1115
1116 // Check for (0 / -x) that will produce negative zero.
1117 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1118 Label left_not_zero;
1119 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1120 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1121 Operand(zero_reg));
1122 __ bind(&left_not_zero);
1123 }
1124
1125 // Check for (kMinInt / -1).
1126 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1127 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1128 Label left_not_min_int;
1129 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1130 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1131 __ bind(&left_not_min_int);
1132 }
1133
1134 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1135 // Calculate remainder.
1136 Register remainder = ToRegister(instr->temp());
1137 if (kArchVariant != kMips64r6) {
1138 __ mfhi(remainder);
1139 } else {
1140 __ dmod(remainder, dividend, divisor);
1141 }
1142 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
1143 Operand(zero_reg));
1144 }
1145}
1146
1147
1148void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1149 DoubleRegister addend = ToDoubleRegister(instr->addend());
1150 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1151 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1152
1153 // This is computed in-place.
1154 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1155
1156 __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0());
1157}
1158
1159
1160void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1161 Register dividend = ToRegister(instr->dividend());
1162 Register result = ToRegister(instr->result());
1163 int32_t divisor = instr->divisor();
1164 Register scratch = result.is(dividend) ? scratch0() : dividend;
1165 DCHECK(!result.is(dividend) || !scratch.is(dividend));
1166
1167 // If the divisor is 1, return the dividend.
Ben Murdoch61f157c2016-09-16 13:49:30 +01001168 if (divisor == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001169 __ Move(result, dividend);
1170 return;
1171 }
1172
1173 // If the divisor is positive, things are easy: There can be no deopts and we
1174 // can simply do an arithmetic right shift.
1175 uint16_t shift = WhichPowerOf2Abs(divisor);
1176 if (divisor > 1) {
1177 __ dsra(result, dividend, shift);
1178 return;
1179 }
1180
1181 // If the divisor is negative, we have to negate and handle edge cases.
1182 // Dividend can be the same register as result so save the value of it
1183 // for checking overflow.
1184 __ Move(scratch, dividend);
1185
1186 __ Dsubu(result, zero_reg, dividend);
1187 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1188 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
1189 }
1190
1191 __ Xor(scratch, scratch, result);
1192 // Dividing by -1 is basically negation, unless we overflow.
1193 if (divisor == -1) {
1194 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1195 DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, result, Operand(kMaxInt));
1196 }
1197 return;
1198 }
1199
1200 // If the negation could not overflow, simply shifting is OK.
1201 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1202 __ dsra(result, result, shift);
1203 return;
1204 }
1205
1206 Label no_overflow, done;
1207 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
1208 __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE);
1209 __ Branch(&done);
1210 __ bind(&no_overflow);
1211 __ dsra(result, result, shift);
1212 __ bind(&done);
1213}
1214
1215
1216void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1217 Register dividend = ToRegister(instr->dividend());
1218 int32_t divisor = instr->divisor();
1219 Register result = ToRegister(instr->result());
1220 DCHECK(!dividend.is(result));
1221
1222 if (divisor == 0) {
1223 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1224 return;
1225 }
1226
1227 // Check for (0 / -x) that will produce negative zero.
1228 HMathFloorOfDiv* hdiv = instr->hydrogen();
1229 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1230 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1231 Operand(zero_reg));
1232 }
1233
1234 // Easy case: We need no dynamic check for the dividend and the flooring
1235 // division is the same as the truncating division.
1236 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1237 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1238 __ TruncatingDiv(result, dividend, Abs(divisor));
1239 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1240 return;
1241 }
1242
1243 // In the general case we may need to adjust before and after the truncating
1244 // division to get a flooring division.
1245 Register temp = ToRegister(instr->temp());
1246 DCHECK(!temp.is(dividend) && !temp.is(result));
1247 Label needs_adjustment, done;
1248 __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1249 dividend, Operand(zero_reg));
1250 __ TruncatingDiv(result, dividend, Abs(divisor));
1251 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1252 __ jmp(&done);
1253 __ bind(&needs_adjustment);
1254 __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1255 __ TruncatingDiv(result, temp, Abs(divisor));
1256 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1257 __ Dsubu(result, result, Operand(1));
1258 __ bind(&done);
1259}
1260
1261
1262// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1263void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1264 HBinaryOperation* hdiv = instr->hydrogen();
1265 Register dividend = ToRegister(instr->dividend());
1266 Register divisor = ToRegister(instr->divisor());
1267 const Register result = ToRegister(instr->result());
1268
1269 // On MIPS div is asynchronous - it will run in the background while we
1270 // check for special cases.
1271 __ Ddiv(result, dividend, divisor);
1272
1273 // Check for x / 0.
1274 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1275 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1276 Operand(zero_reg));
1277 }
1278
1279 // Check for (0 / -x) that will produce negative zero.
1280 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1281 Label left_not_zero;
1282 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1283 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1284 Operand(zero_reg));
1285 __ bind(&left_not_zero);
1286 }
1287
1288 // Check for (kMinInt / -1).
1289 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1290 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1291 Label left_not_min_int;
1292 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1293 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1294 __ bind(&left_not_min_int);
1295 }
1296
1297 // We performed a truncating division. Correct the result if necessary.
1298 Label done;
1299 Register remainder = scratch0();
1300 if (kArchVariant != kMips64r6) {
1301 __ mfhi(remainder);
1302 } else {
1303 __ dmod(remainder, dividend, divisor);
1304 }
1305 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1306 __ Xor(remainder, remainder, Operand(divisor));
1307 __ Branch(&done, ge, remainder, Operand(zero_reg));
1308 __ Dsubu(result, result, Operand(1));
1309 __ bind(&done);
1310}
1311
1312
1313void LCodeGen::DoMulS(LMulS* instr) {
1314 Register scratch = scratch0();
1315 Register result = ToRegister(instr->result());
1316 // Note that result may alias left.
1317 Register left = ToRegister(instr->left());
1318 LOperand* right_op = instr->right();
1319
1320 bool bailout_on_minus_zero =
1321 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1322 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1323
1324 if (right_op->IsConstantOperand()) {
1325 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1326
1327 if (bailout_on_minus_zero && (constant < 0)) {
1328 // The case of a null constant will be handled separately.
1329 // If constant is negative and left is null, the result should be -0.
1330 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
1331 }
1332
1333 switch (constant) {
1334 case -1:
1335 if (overflow) {
Ben Murdochda12d292016-06-02 14:46:10 +01001336 Label no_overflow;
1337 __ DsubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
1338 DeoptimizeIf(al, instr);
1339 __ bind(&no_overflow);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001340 } else {
1341 __ Dsubu(result, zero_reg, left);
1342 }
1343 break;
1344 case 0:
1345 if (bailout_on_minus_zero) {
1346 // If left is strictly negative and the constant is null, the
1347 // result is -0. Deoptimize if required, otherwise return 0.
1348 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
1349 Operand(zero_reg));
1350 }
1351 __ mov(result, zero_reg);
1352 break;
1353 case 1:
1354 // Nothing to do.
1355 __ Move(result, left);
1356 break;
1357 default:
1358 // Multiplying by powers of two and powers of two plus or minus
1359 // one can be done faster with shifted operands.
1360 // For other constants we emit standard code.
1361 int32_t mask = constant >> 31;
1362 uint32_t constant_abs = (constant + mask) ^ mask;
1363
1364 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1365 int32_t shift = WhichPowerOf2(constant_abs);
1366 __ dsll(result, left, shift);
1367 // Correct the sign of the result if the constant is negative.
1368 if (constant < 0) __ Dsubu(result, zero_reg, result);
1369 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1370 int32_t shift = WhichPowerOf2(constant_abs - 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001371 __ Dlsa(result, left, left, shift);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001372 // Correct the sign of the result if the constant is negative.
1373 if (constant < 0) __ Dsubu(result, zero_reg, result);
1374 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1375 int32_t shift = WhichPowerOf2(constant_abs + 1);
1376 __ dsll(scratch, left, shift);
1377 __ Dsubu(result, scratch, left);
1378 // Correct the sign of the result if the constant is negative.
1379 if (constant < 0) __ Dsubu(result, zero_reg, result);
1380 } else {
1381 // Generate standard code.
1382 __ li(at, constant);
1383 __ Dmul(result, left, at);
1384 }
1385 }
1386 } else {
1387 DCHECK(right_op->IsRegister());
1388 Register right = ToRegister(right_op);
1389
1390 if (overflow) {
1391 // hi:lo = left * right.
1392 __ Dmulh(result, left, right);
1393 __ dsra32(scratch, result, 0);
1394 __ sra(at, result, 31);
1395 __ SmiTag(result);
1396 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
1397 } else {
1398 __ SmiUntag(result, left);
1399 __ dmul(result, result, right);
1400 }
1401
1402 if (bailout_on_minus_zero) {
1403 Label done;
1404 __ Xor(at, left, right);
1405 __ Branch(&done, ge, at, Operand(zero_reg));
1406 // Bail out if the result is minus zero.
1407 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
1408 Operand(zero_reg));
1409 __ bind(&done);
1410 }
1411 }
1412}
1413
1414
1415void LCodeGen::DoMulI(LMulI* instr) {
1416 Register scratch = scratch0();
1417 Register result = ToRegister(instr->result());
1418 // Note that result may alias left.
1419 Register left = ToRegister(instr->left());
1420 LOperand* right_op = instr->right();
1421
1422 bool bailout_on_minus_zero =
1423 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1424 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1425
1426 if (right_op->IsConstantOperand()) {
1427 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1428
1429 if (bailout_on_minus_zero && (constant < 0)) {
1430 // The case of a null constant will be handled separately.
1431 // If constant is negative and left is null, the result should be -0.
1432 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
1433 }
1434
1435 switch (constant) {
1436 case -1:
1437 if (overflow) {
Ben Murdochda12d292016-06-02 14:46:10 +01001438 Label no_overflow;
1439 __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
1440 DeoptimizeIf(al, instr);
1441 __ bind(&no_overflow);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001442 } else {
1443 __ Subu(result, zero_reg, left);
1444 }
1445 break;
1446 case 0:
1447 if (bailout_on_minus_zero) {
1448 // If left is strictly negative and the constant is null, the
1449 // result is -0. Deoptimize if required, otherwise return 0.
1450 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
1451 Operand(zero_reg));
1452 }
1453 __ mov(result, zero_reg);
1454 break;
1455 case 1:
1456 // Nothing to do.
1457 __ Move(result, left);
1458 break;
1459 default:
1460 // Multiplying by powers of two and powers of two plus or minus
1461 // one can be done faster with shifted operands.
1462 // For other constants we emit standard code.
1463 int32_t mask = constant >> 31;
1464 uint32_t constant_abs = (constant + mask) ^ mask;
1465
1466 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1467 int32_t shift = WhichPowerOf2(constant_abs);
1468 __ sll(result, left, shift);
1469 // Correct the sign of the result if the constant is negative.
1470 if (constant < 0) __ Subu(result, zero_reg, result);
1471 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1472 int32_t shift = WhichPowerOf2(constant_abs - 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001473 __ Lsa(result, left, left, shift);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001474 // Correct the sign of the result if the constant is negative.
1475 if (constant < 0) __ Subu(result, zero_reg, result);
1476 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1477 int32_t shift = WhichPowerOf2(constant_abs + 1);
1478 __ sll(scratch, left, shift);
1479 __ Subu(result, scratch, left);
1480 // Correct the sign of the result if the constant is negative.
1481 if (constant < 0) __ Subu(result, zero_reg, result);
1482 } else {
1483 // Generate standard code.
1484 __ li(at, constant);
1485 __ Mul(result, left, at);
1486 }
1487 }
1488
1489 } else {
1490 DCHECK(right_op->IsRegister());
1491 Register right = ToRegister(right_op);
1492
1493 if (overflow) {
1494 // hi:lo = left * right.
1495 __ Dmul(result, left, right);
1496 __ dsra32(scratch, result, 0);
1497 __ sra(at, result, 31);
1498
1499 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
1500 } else {
1501 __ mul(result, left, right);
1502 }
1503
1504 if (bailout_on_minus_zero) {
1505 Label done;
1506 __ Xor(at, left, right);
1507 __ Branch(&done, ge, at, Operand(zero_reg));
1508 // Bail out if the result is minus zero.
1509 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
1510 Operand(zero_reg));
1511 __ bind(&done);
1512 }
1513 }
1514}
1515
1516
1517void LCodeGen::DoBitI(LBitI* instr) {
1518 LOperand* left_op = instr->left();
1519 LOperand* right_op = instr->right();
1520 DCHECK(left_op->IsRegister());
1521 Register left = ToRegister(left_op);
1522 Register result = ToRegister(instr->result());
1523 Operand right(no_reg);
1524
1525 if (right_op->IsStackSlot()) {
1526 right = Operand(EmitLoadRegister(right_op, at));
1527 } else {
1528 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1529 right = ToOperand(right_op);
1530 }
1531
1532 switch (instr->op()) {
1533 case Token::BIT_AND:
1534 __ And(result, left, right);
1535 break;
1536 case Token::BIT_OR:
1537 __ Or(result, left, right);
1538 break;
1539 case Token::BIT_XOR:
1540 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1541 __ Nor(result, zero_reg, left);
1542 } else {
1543 __ Xor(result, left, right);
1544 }
1545 break;
1546 default:
1547 UNREACHABLE();
1548 break;
1549 }
1550}
1551
1552
1553void LCodeGen::DoShiftI(LShiftI* instr) {
1554 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1555 // result may alias either of them.
1556 LOperand* right_op = instr->right();
1557 Register left = ToRegister(instr->left());
1558 Register result = ToRegister(instr->result());
1559
1560 if (right_op->IsRegister()) {
1561 // No need to mask the right operand on MIPS, it is built into the variable
1562 // shift instructions.
1563 switch (instr->op()) {
1564 case Token::ROR:
1565 __ Ror(result, left, Operand(ToRegister(right_op)));
1566 break;
1567 case Token::SAR:
1568 __ srav(result, left, ToRegister(right_op));
1569 break;
1570 case Token::SHR:
1571 __ srlv(result, left, ToRegister(right_op));
1572 if (instr->can_deopt()) {
1573 // TODO(yy): (-1) >>> 0. anything else?
1574 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
1575 Operand(zero_reg));
1576 DeoptimizeIf(gt, instr, Deoptimizer::kNegativeValue, result,
1577 Operand(kMaxInt));
1578 }
1579 break;
1580 case Token::SHL:
1581 __ sllv(result, left, ToRegister(right_op));
1582 break;
1583 default:
1584 UNREACHABLE();
1585 break;
1586 }
1587 } else {
1588 // Mask the right_op operand.
1589 int value = ToInteger32(LConstantOperand::cast(right_op));
1590 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1591 switch (instr->op()) {
1592 case Token::ROR:
1593 if (shift_count != 0) {
1594 __ Ror(result, left, Operand(shift_count));
1595 } else {
1596 __ Move(result, left);
1597 }
1598 break;
1599 case Token::SAR:
1600 if (shift_count != 0) {
1601 __ sra(result, left, shift_count);
1602 } else {
1603 __ Move(result, left);
1604 }
1605 break;
1606 case Token::SHR:
1607 if (shift_count != 0) {
1608 __ srl(result, left, shift_count);
1609 } else {
1610 if (instr->can_deopt()) {
1611 __ And(at, left, Operand(0x80000000));
1612 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
1613 Operand(zero_reg));
1614 }
1615 __ Move(result, left);
1616 }
1617 break;
1618 case Token::SHL:
1619 if (shift_count != 0) {
1620 if (instr->hydrogen_value()->representation().IsSmi()) {
1621 __ dsll(result, left, shift_count);
1622 } else {
1623 __ sll(result, left, shift_count);
1624 }
1625 } else {
1626 __ Move(result, left);
1627 }
1628 break;
1629 default:
1630 UNREACHABLE();
1631 break;
1632 }
1633 }
1634}
1635
1636
1637void LCodeGen::DoSubS(LSubS* instr) {
1638 LOperand* left = instr->left();
1639 LOperand* right = instr->right();
1640 LOperand* result = instr->result();
1641 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1642
1643 if (!can_overflow) {
1644 DCHECK(right->IsRegister() || right->IsConstantOperand());
1645 __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
1646 } else { // can_overflow.
Ben Murdochda12d292016-06-02 14:46:10 +01001647 Register scratch = scratch0();
1648 Label no_overflow_label;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001649 DCHECK(right->IsRegister() || right->IsConstantOperand());
Ben Murdochda12d292016-06-02 14:46:10 +01001650 __ DsubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1651 &no_overflow_label, scratch);
1652 DeoptimizeIf(al, instr);
1653 __ bind(&no_overflow_label);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001654 }
1655}
1656
1657
1658void LCodeGen::DoSubI(LSubI* instr) {
1659 LOperand* left = instr->left();
1660 LOperand* right = instr->right();
1661 LOperand* result = instr->result();
1662 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1663
1664 if (!can_overflow) {
1665 DCHECK(right->IsRegister() || right->IsConstantOperand());
1666 __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1667 } else { // can_overflow.
Ben Murdochda12d292016-06-02 14:46:10 +01001668 Register scratch = scratch0();
1669 Label no_overflow_label;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001670 DCHECK(right->IsRegister() || right->IsConstantOperand());
Ben Murdochda12d292016-06-02 14:46:10 +01001671 __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1672 &no_overflow_label, scratch);
1673 DeoptimizeIf(al, instr);
1674 __ bind(&no_overflow_label);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001675 }
1676}
1677
1678
1679void LCodeGen::DoConstantI(LConstantI* instr) {
1680 __ li(ToRegister(instr->result()), Operand(instr->value()));
1681}
1682
1683
1684void LCodeGen::DoConstantS(LConstantS* instr) {
1685 __ li(ToRegister(instr->result()), Operand(instr->value()));
1686}
1687
1688
1689void LCodeGen::DoConstantD(LConstantD* instr) {
1690 DCHECK(instr->result()->IsDoubleRegister());
1691 DoubleRegister result = ToDoubleRegister(instr->result());
1692 double v = instr->value();
1693 __ Move(result, v);
1694}
1695
1696
1697void LCodeGen::DoConstantE(LConstantE* instr) {
1698 __ li(ToRegister(instr->result()), Operand(instr->value()));
1699}
1700
1701
1702void LCodeGen::DoConstantT(LConstantT* instr) {
1703 Handle<Object> object = instr->value(isolate());
1704 AllowDeferredHandleDereference smi_check;
1705 __ li(ToRegister(instr->result()), object);
1706}
1707
1708
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001709MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1710 LOperand* index,
1711 String::Encoding encoding) {
1712 if (index->IsConstantOperand()) {
1713 int offset = ToInteger32(LConstantOperand::cast(index));
1714 if (encoding == String::TWO_BYTE_ENCODING) {
1715 offset *= kUC16Size;
1716 }
1717 STATIC_ASSERT(kCharSize == 1);
1718 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1719 }
1720 Register scratch = scratch0();
1721 DCHECK(!scratch.is(string));
1722 DCHECK(!scratch.is(ToRegister(index)));
1723 if (encoding == String::ONE_BYTE_ENCODING) {
1724 __ Daddu(scratch, string, ToRegister(index));
1725 } else {
1726 STATIC_ASSERT(kUC16Size == 2);
1727 __ dsll(scratch, ToRegister(index), 1);
1728 __ Daddu(scratch, string, scratch);
1729 }
1730 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1731}
1732
1733
1734void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1735 String::Encoding encoding = instr->hydrogen()->encoding();
1736 Register string = ToRegister(instr->string());
1737 Register result = ToRegister(instr->result());
1738
1739 if (FLAG_debug_code) {
1740 Register scratch = scratch0();
1741 __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1742 __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1743
1744 __ And(scratch, scratch,
1745 Operand(kStringRepresentationMask | kStringEncodingMask));
1746 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1747 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1748 __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1749 ? one_byte_seq_type : two_byte_seq_type));
1750 __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1751 }
1752
1753 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1754 if (encoding == String::ONE_BYTE_ENCODING) {
1755 __ lbu(result, operand);
1756 } else {
1757 __ lhu(result, operand);
1758 }
1759}
1760
1761
1762void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1763 String::Encoding encoding = instr->hydrogen()->encoding();
1764 Register string = ToRegister(instr->string());
1765 Register value = ToRegister(instr->value());
1766
1767 if (FLAG_debug_code) {
1768 Register scratch = scratch0();
1769 Register index = ToRegister(instr->index());
1770 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1771 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1772 int encoding_mask =
1773 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1774 ? one_byte_seq_type : two_byte_seq_type;
1775 __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1776 }
1777
1778 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1779 if (encoding == String::ONE_BYTE_ENCODING) {
1780 __ sb(value, operand);
1781 } else {
1782 __ sh(value, operand);
1783 }
1784}
1785
1786
1787void LCodeGen::DoAddE(LAddE* instr) {
1788 LOperand* result = instr->result();
1789 LOperand* left = instr->left();
1790 LOperand* right = instr->right();
1791
1792 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1793 DCHECK(right->IsRegister() || right->IsConstantOperand());
1794 __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
1795}
1796
1797
1798void LCodeGen::DoAddS(LAddS* instr) {
1799 LOperand* left = instr->left();
1800 LOperand* right = instr->right();
1801 LOperand* result = instr->result();
1802 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1803
1804 if (!can_overflow) {
1805 DCHECK(right->IsRegister() || right->IsConstantOperand());
1806 __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
1807 } else { // can_overflow.
Ben Murdochda12d292016-06-02 14:46:10 +01001808 Label no_overflow_label;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001809 Register scratch = scratch1();
1810 DCHECK(right->IsRegister() || right->IsConstantOperand());
Ben Murdochda12d292016-06-02 14:46:10 +01001811 __ DaddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1812 &no_overflow_label, scratch);
1813 DeoptimizeIf(al, instr);
1814 __ bind(&no_overflow_label);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001815 }
1816}
1817
1818
1819void LCodeGen::DoAddI(LAddI* instr) {
1820 LOperand* left = instr->left();
1821 LOperand* right = instr->right();
1822 LOperand* result = instr->result();
1823 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1824
1825 if (!can_overflow) {
1826 DCHECK(right->IsRegister() || right->IsConstantOperand());
1827 __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1828 } else { // can_overflow.
Ben Murdochda12d292016-06-02 14:46:10 +01001829 Label no_overflow_label;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001830 Register scratch = scratch1();
1831 DCHECK(right->IsRegister() || right->IsConstantOperand());
Ben Murdochda12d292016-06-02 14:46:10 +01001832 __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1833 &no_overflow_label, scratch);
1834 DeoptimizeIf(al, instr);
1835 __ bind(&no_overflow_label);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001836 }
1837}
1838
1839
1840void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1841 LOperand* left = instr->left();
1842 LOperand* right = instr->right();
1843 HMathMinMax::Operation operation = instr->hydrogen()->operation();
Ben Murdochc5610432016-08-08 18:44:38 +01001844 Register scratch = scratch1();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001845 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
Ben Murdochc5610432016-08-08 18:44:38 +01001846 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001847 Register left_reg = ToRegister(left);
1848 Register right_reg = EmitLoadRegister(right, scratch0());
1849 Register result_reg = ToRegister(instr->result());
1850 Label return_right, done;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001851 __ Slt(scratch, left_reg, Operand(right_reg));
1852 if (condition == ge) {
1853 __ Movz(result_reg, left_reg, scratch);
1854 __ Movn(result_reg, right_reg, scratch);
1855 } else {
1856 DCHECK(condition == le);
1857 __ Movn(result_reg, left_reg, scratch);
1858 __ Movz(result_reg, right_reg, scratch);
1859 }
1860 } else {
1861 DCHECK(instr->hydrogen()->representation().IsDouble());
1862 FPURegister left_reg = ToDoubleRegister(left);
1863 FPURegister right_reg = ToDoubleRegister(right);
1864 FPURegister result_reg = ToDoubleRegister(instr->result());
Ben Murdochc5610432016-08-08 18:44:38 +01001865 Label nan, done;
1866 if (operation == HMathMinMax::kMathMax) {
1867 __ MaxNaNCheck_d(result_reg, left_reg, right_reg, &nan);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001868 } else {
Ben Murdochc5610432016-08-08 18:44:38 +01001869 DCHECK(operation == HMathMinMax::kMathMin);
1870 __ MinNaNCheck_d(result_reg, left_reg, right_reg, &nan);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001871 }
1872 __ Branch(&done);
1873
Ben Murdochc5610432016-08-08 18:44:38 +01001874 __ bind(&nan);
1875 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
1876 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001877
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001878 __ bind(&done);
1879 }
1880}
1881
1882
1883void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1884 DoubleRegister left = ToDoubleRegister(instr->left());
1885 DoubleRegister right = ToDoubleRegister(instr->right());
1886 DoubleRegister result = ToDoubleRegister(instr->result());
1887 switch (instr->op()) {
1888 case Token::ADD:
1889 __ add_d(result, left, right);
1890 break;
1891 case Token::SUB:
1892 __ sub_d(result, left, right);
1893 break;
1894 case Token::MUL:
1895 __ mul_d(result, left, right);
1896 break;
1897 case Token::DIV:
1898 __ div_d(result, left, right);
1899 break;
1900 case Token::MOD: {
1901 // Save a0-a3 on the stack.
1902 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1903 __ MultiPush(saved_regs);
1904
1905 __ PrepareCallCFunction(0, 2, scratch0());
1906 __ MovToFloatParameters(left, right);
1907 __ CallCFunction(
1908 ExternalReference::mod_two_doubles_operation(isolate()),
1909 0, 2);
1910 // Move the result in the double result register.
1911 __ MovFromFloatResult(result);
1912
1913 // Restore saved register.
1914 __ MultiPop(saved_regs);
1915 break;
1916 }
1917 default:
1918 UNREACHABLE();
1919 break;
1920 }
1921}
1922
1923
1924void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1925 DCHECK(ToRegister(instr->context()).is(cp));
1926 DCHECK(ToRegister(instr->left()).is(a1));
1927 DCHECK(ToRegister(instr->right()).is(a0));
1928 DCHECK(ToRegister(instr->result()).is(v0));
1929
Ben Murdoch097c5b22016-05-18 11:27:45 +01001930 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001931 CallCode(code, RelocInfo::CODE_TARGET, instr);
1932 // Other arch use a nop here, to signal that there is no inlined
1933 // patchable code. Mips does not need the nop, since our marker
1934 // instruction (andi zero_reg) will never be used in normal code.
1935}
1936
1937
1938template<class InstrType>
1939void LCodeGen::EmitBranch(InstrType instr,
1940 Condition condition,
1941 Register src1,
1942 const Operand& src2) {
1943 int left_block = instr->TrueDestination(chunk_);
1944 int right_block = instr->FalseDestination(chunk_);
1945
1946 int next_block = GetNextEmittedBlock();
1947 if (right_block == left_block || condition == al) {
1948 EmitGoto(left_block);
1949 } else if (left_block == next_block) {
1950 __ Branch(chunk_->GetAssemblyLabel(right_block),
1951 NegateCondition(condition), src1, src2);
1952 } else if (right_block == next_block) {
1953 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1954 } else {
1955 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1956 __ Branch(chunk_->GetAssemblyLabel(right_block));
1957 }
1958}
1959
1960
1961template<class InstrType>
1962void LCodeGen::EmitBranchF(InstrType instr,
1963 Condition condition,
1964 FPURegister src1,
1965 FPURegister src2) {
1966 int right_block = instr->FalseDestination(chunk_);
1967 int left_block = instr->TrueDestination(chunk_);
1968
1969 int next_block = GetNextEmittedBlock();
1970 if (right_block == left_block) {
1971 EmitGoto(left_block);
1972 } else if (left_block == next_block) {
1973 __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1974 NegateFpuCondition(condition), src1, src2);
1975 } else if (right_block == next_block) {
1976 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1977 condition, src1, src2);
1978 } else {
1979 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1980 condition, src1, src2);
1981 __ Branch(chunk_->GetAssemblyLabel(right_block));
1982 }
1983}
1984
1985
1986template <class InstrType>
1987void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
1988 Register src1, const Operand& src2) {
1989 int true_block = instr->TrueDestination(chunk_);
1990 __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
1991}
1992
1993
1994template <class InstrType>
1995void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
1996 Register src1, const Operand& src2) {
1997 int false_block = instr->FalseDestination(chunk_);
1998 __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
1999}
2000
2001
2002template<class InstrType>
2003void LCodeGen::EmitFalseBranchF(InstrType instr,
2004 Condition condition,
2005 FPURegister src1,
2006 FPURegister src2) {
2007 int false_block = instr->FalseDestination(chunk_);
2008 __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
2009 condition, src1, src2);
2010}
2011
2012
2013void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2014 __ stop("LDebugBreak");
2015}
2016
2017
2018void LCodeGen::DoBranch(LBranch* instr) {
2019 Representation r = instr->hydrogen()->value()->representation();
2020 if (r.IsInteger32() || r.IsSmi()) {
2021 DCHECK(!info()->IsStub());
2022 Register reg = ToRegister(instr->value());
2023 EmitBranch(instr, ne, reg, Operand(zero_reg));
2024 } else if (r.IsDouble()) {
2025 DCHECK(!info()->IsStub());
2026 DoubleRegister reg = ToDoubleRegister(instr->value());
2027 // Test the double value. Zero and NaN are false.
2028 EmitBranchF(instr, ogl, reg, kDoubleRegZero);
2029 } else {
2030 DCHECK(r.IsTagged());
2031 Register reg = ToRegister(instr->value());
2032 HType type = instr->hydrogen()->value()->type();
2033 if (type.IsBoolean()) {
2034 DCHECK(!info()->IsStub());
2035 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2036 EmitBranch(instr, eq, reg, Operand(at));
2037 } else if (type.IsSmi()) {
2038 DCHECK(!info()->IsStub());
2039 EmitBranch(instr, ne, reg, Operand(zero_reg));
2040 } else if (type.IsJSArray()) {
2041 DCHECK(!info()->IsStub());
2042 EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2043 } else if (type.IsHeapNumber()) {
2044 DCHECK(!info()->IsStub());
2045 DoubleRegister dbl_scratch = double_scratch0();
2046 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2047 // Test the double value. Zero and NaN are false.
2048 EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
2049 } else if (type.IsString()) {
2050 DCHECK(!info()->IsStub());
2051 __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
2052 EmitBranch(instr, ne, at, Operand(zero_reg));
2053 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01002054 ToBooleanICStub::Types expected =
2055 instr->hydrogen()->expected_input_types();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002056 // Avoid deopts in the case where we've never executed this path before.
Ben Murdochda12d292016-06-02 14:46:10 +01002057 if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002058
Ben Murdochda12d292016-06-02 14:46:10 +01002059 if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002060 // undefined -> false.
2061 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2062 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2063 }
Ben Murdochda12d292016-06-02 14:46:10 +01002064 if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002065 // Boolean -> its value.
2066 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2067 __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2068 __ LoadRoot(at, Heap::kFalseValueRootIndex);
2069 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2070 }
Ben Murdochda12d292016-06-02 14:46:10 +01002071 if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002072 // 'null' -> false.
2073 __ LoadRoot(at, Heap::kNullValueRootIndex);
2074 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2075 }
2076
Ben Murdochda12d292016-06-02 14:46:10 +01002077 if (expected.Contains(ToBooleanICStub::SMI)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002078 // Smis: 0 -> false, all other -> true.
2079 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2080 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2081 } else if (expected.NeedsMap()) {
2082 // If we need a map later and have a Smi -> deopt.
2083 __ SmiTst(reg, at);
2084 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
2085 }
2086
2087 const Register map = scratch0();
2088 if (expected.NeedsMap()) {
2089 __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2090 if (expected.CanBeUndetectable()) {
2091 // Undetectable -> false.
2092 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2093 __ And(at, at, Operand(1 << Map::kIsUndetectable));
2094 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2095 }
2096 }
2097
Ben Murdochda12d292016-06-02 14:46:10 +01002098 if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002099 // spec object -> true.
2100 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2101 __ Branch(instr->TrueLabel(chunk_),
2102 ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
2103 }
2104
Ben Murdochda12d292016-06-02 14:46:10 +01002105 if (expected.Contains(ToBooleanICStub::STRING)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002106 // String value -> false iff empty.
2107 Label not_string;
2108 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2109 __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2110 __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
2111 __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2112 __ Branch(instr->FalseLabel(chunk_));
2113 __ bind(&not_string);
2114 }
2115
Ben Murdochda12d292016-06-02 14:46:10 +01002116 if (expected.Contains(ToBooleanICStub::SYMBOL)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002117 // Symbol value -> true.
2118 const Register scratch = scratch1();
2119 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2120 __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2121 }
2122
Ben Murdochda12d292016-06-02 14:46:10 +01002123 if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002124 // SIMD value -> true.
2125 const Register scratch = scratch1();
2126 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2127 __ Branch(instr->TrueLabel(chunk_), eq, scratch,
2128 Operand(SIMD128_VALUE_TYPE));
2129 }
2130
Ben Murdochda12d292016-06-02 14:46:10 +01002131 if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002132 // heap number -> false iff +0, -0, or NaN.
2133 DoubleRegister dbl_scratch = double_scratch0();
2134 Label not_heap_number;
2135 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2136 __ Branch(&not_heap_number, ne, map, Operand(at));
2137 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2138 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2139 ne, dbl_scratch, kDoubleRegZero);
2140 // Falls through if dbl_scratch == 0.
2141 __ Branch(instr->FalseLabel(chunk_));
2142 __ bind(&not_heap_number);
2143 }
2144
2145 if (!expected.IsGeneric()) {
2146 // We've seen something for the first time -> deopt.
2147 // This can only happen if we are not generic already.
2148 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
2149 Operand(zero_reg));
2150 }
2151 }
2152 }
2153}
2154
2155
2156void LCodeGen::EmitGoto(int block) {
2157 if (!IsNextEmittedBlock(block)) {
2158 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2159 }
2160}
2161
2162
2163void LCodeGen::DoGoto(LGoto* instr) {
2164 EmitGoto(instr->block_id());
2165}
2166
2167
2168Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2169 Condition cond = kNoCondition;
2170 switch (op) {
2171 case Token::EQ:
2172 case Token::EQ_STRICT:
2173 cond = eq;
2174 break;
2175 case Token::NE:
2176 case Token::NE_STRICT:
2177 cond = ne;
2178 break;
2179 case Token::LT:
2180 cond = is_unsigned ? lo : lt;
2181 break;
2182 case Token::GT:
2183 cond = is_unsigned ? hi : gt;
2184 break;
2185 case Token::LTE:
2186 cond = is_unsigned ? ls : le;
2187 break;
2188 case Token::GTE:
2189 cond = is_unsigned ? hs : ge;
2190 break;
2191 case Token::IN:
2192 case Token::INSTANCEOF:
2193 default:
2194 UNREACHABLE();
2195 }
2196 return cond;
2197}
2198
2199
2200void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2201 LOperand* left = instr->left();
2202 LOperand* right = instr->right();
2203 bool is_unsigned =
2204 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2205 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2206 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2207
2208 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2209 // We can statically evaluate the comparison.
2210 double left_val = ToDouble(LConstantOperand::cast(left));
2211 double right_val = ToDouble(LConstantOperand::cast(right));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002212 int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2213 ? instr->TrueDestination(chunk_)
2214 : instr->FalseDestination(chunk_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002215 EmitGoto(next_block);
2216 } else {
2217 if (instr->is_double()) {
2218 // Compare left and right as doubles and load the
2219 // resulting flags into the normal status register.
2220 FPURegister left_reg = ToDoubleRegister(left);
2221 FPURegister right_reg = ToDoubleRegister(right);
2222
2223 // If a NaN is involved, i.e. the result is unordered,
2224 // jump to false block label.
2225 __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2226 left_reg, right_reg);
2227
2228 EmitBranchF(instr, cond, left_reg, right_reg);
2229 } else {
2230 Register cmp_left;
2231 Operand cmp_right = Operand((int64_t)0);
2232 if (right->IsConstantOperand()) {
2233 int32_t value = ToInteger32(LConstantOperand::cast(right));
2234 if (instr->hydrogen_value()->representation().IsSmi()) {
2235 cmp_left = ToRegister(left);
2236 cmp_right = Operand(Smi::FromInt(value));
2237 } else {
2238 cmp_left = ToRegister(left);
2239 cmp_right = Operand(value);
2240 }
2241 } else if (left->IsConstantOperand()) {
2242 int32_t value = ToInteger32(LConstantOperand::cast(left));
2243 if (instr->hydrogen_value()->representation().IsSmi()) {
2244 cmp_left = ToRegister(right);
2245 cmp_right = Operand(Smi::FromInt(value));
2246 } else {
2247 cmp_left = ToRegister(right);
2248 cmp_right = Operand(value);
2249 }
2250 // We commuted the operands, so commute the condition.
2251 cond = CommuteCondition(cond);
2252 } else {
2253 cmp_left = ToRegister(left);
2254 cmp_right = Operand(ToRegister(right));
2255 }
2256
2257 EmitBranch(instr, cond, cmp_left, cmp_right);
2258 }
2259 }
2260}
2261
2262
2263void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2264 Register left = ToRegister(instr->left());
2265 Register right = ToRegister(instr->right());
2266
2267 EmitBranch(instr, eq, left, Operand(right));
2268}
2269
2270
2271void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2272 if (instr->hydrogen()->representation().IsTagged()) {
2273 Register input_reg = ToRegister(instr->object());
2274 __ li(at, Operand(factory()->the_hole_value()));
2275 EmitBranch(instr, eq, input_reg, Operand(at));
2276 return;
2277 }
2278
2279 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2280 EmitFalseBranchF(instr, eq, input_reg, input_reg);
2281
2282 Register scratch = scratch0();
2283 __ FmoveHigh(scratch, input_reg);
2284 EmitBranch(instr, eq, scratch,
2285 Operand(static_cast<int32_t>(kHoleNanUpper32)));
2286}
2287
2288
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002289Condition LCodeGen::EmitIsString(Register input,
2290 Register temp1,
2291 Label* is_not_string,
2292 SmiCheck check_needed = INLINE_SMI_CHECK) {
2293 if (check_needed == INLINE_SMI_CHECK) {
2294 __ JumpIfSmi(input, is_not_string);
2295 }
2296 __ GetObjectType(input, temp1, temp1);
2297
2298 return lt;
2299}
2300
2301
2302void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2303 Register reg = ToRegister(instr->value());
2304 Register temp1 = ToRegister(instr->temp());
2305
2306 SmiCheck check_needed =
2307 instr->hydrogen()->value()->type().IsHeapObject()
2308 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2309 Condition true_cond =
2310 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2311
2312 EmitBranch(instr, true_cond, temp1,
2313 Operand(FIRST_NONSTRING_TYPE));
2314}
2315
2316
2317void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2318 Register input_reg = EmitLoadRegister(instr->value(), at);
2319 __ And(at, input_reg, kSmiTagMask);
2320 EmitBranch(instr, eq, at, Operand(zero_reg));
2321}
2322
2323
2324void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2325 Register input = ToRegister(instr->value());
2326 Register temp = ToRegister(instr->temp());
2327
2328 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2329 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2330 }
2331 __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2332 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2333 __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2334 EmitBranch(instr, ne, at, Operand(zero_reg));
2335}
2336
2337
2338static Condition ComputeCompareCondition(Token::Value op) {
2339 switch (op) {
2340 case Token::EQ_STRICT:
2341 case Token::EQ:
2342 return eq;
2343 case Token::LT:
2344 return lt;
2345 case Token::GT:
2346 return gt;
2347 case Token::LTE:
2348 return le;
2349 case Token::GTE:
2350 return ge;
2351 default:
2352 UNREACHABLE();
2353 return kNoCondition;
2354 }
2355}
2356
2357
2358void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2359 DCHECK(ToRegister(instr->context()).is(cp));
2360 DCHECK(ToRegister(instr->left()).is(a1));
2361 DCHECK(ToRegister(instr->right()).is(a0));
2362
Ben Murdochda12d292016-06-02 14:46:10 +01002363 Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002364 CallCode(code, RelocInfo::CODE_TARGET, instr);
Ben Murdochda12d292016-06-02 14:46:10 +01002365 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2366 EmitBranch(instr, eq, v0, Operand(at));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002367}
2368
2369
2370static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2371 InstanceType from = instr->from();
2372 InstanceType to = instr->to();
2373 if (from == FIRST_TYPE) return to;
2374 DCHECK(from == to || to == LAST_TYPE);
2375 return from;
2376}
2377
2378
2379static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2380 InstanceType from = instr->from();
2381 InstanceType to = instr->to();
2382 if (from == to) return eq;
2383 if (to == LAST_TYPE) return hs;
2384 if (from == FIRST_TYPE) return ls;
2385 UNREACHABLE();
2386 return eq;
2387}
2388
2389
2390void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2391 Register scratch = scratch0();
2392 Register input = ToRegister(instr->value());
2393
2394 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2395 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2396 }
2397
2398 __ GetObjectType(input, scratch, scratch);
2399 EmitBranch(instr,
2400 BranchCondition(instr->hydrogen()),
2401 scratch,
2402 Operand(TestType(instr->hydrogen())));
2403}
2404
2405
2406void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2407 Register input = ToRegister(instr->value());
2408 Register result = ToRegister(instr->result());
2409
2410 __ AssertString(input);
2411
2412 __ lwu(result, FieldMemOperand(input, String::kHashFieldOffset));
2413 __ IndexFromHash(result, result);
2414}
2415
2416
2417void LCodeGen::DoHasCachedArrayIndexAndBranch(
2418 LHasCachedArrayIndexAndBranch* instr) {
2419 Register input = ToRegister(instr->value());
2420 Register scratch = scratch0();
2421
2422 __ lwu(scratch,
2423 FieldMemOperand(input, String::kHashFieldOffset));
2424 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2425 EmitBranch(instr, eq, at, Operand(zero_reg));
2426}
2427
2428
2429// Branches to a label or falls through with the answer in flags. Trashes
2430// the temp registers, but not the input.
2431void LCodeGen::EmitClassOfTest(Label* is_true,
2432 Label* is_false,
2433 Handle<String>class_name,
2434 Register input,
2435 Register temp,
2436 Register temp2) {
2437 DCHECK(!input.is(temp));
2438 DCHECK(!input.is(temp2));
2439 DCHECK(!temp.is(temp2));
2440
2441 __ JumpIfSmi(input, is_false);
2442
2443 __ GetObjectType(input, temp, temp2);
Ben Murdochda12d292016-06-02 14:46:10 +01002444 STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002445 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002446 __ Branch(is_true, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002447 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01002448 __ Branch(is_false, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002449 }
2450
2451 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2452 // Check if the constructor in the map is a function.
2453 Register instance_type = scratch1();
2454 DCHECK(!instance_type.is(temp));
2455 __ GetMapConstructor(temp, temp, temp2, instance_type);
2456
2457 // Objects with a non-function constructor have class 'Object'.
2458 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2459 __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2460 } else {
2461 __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2462 }
2463
2464 // temp now contains the constructor function. Grab the
2465 // instance class name from there.
2466 __ ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2467 __ ld(temp, FieldMemOperand(temp,
2468 SharedFunctionInfo::kInstanceClassNameOffset));
2469 // The class name we are testing against is internalized since it's a literal.
2470 // The name in the constructor is internalized because of the way the context
2471 // is booted. This routine isn't expected to work for random API-created
2472 // classes and it doesn't have to because you can't access it with natives
2473 // syntax. Since both sides are internalized it is sufficient to use an
2474 // identity comparison.
2475
2476 // End with the address of this class_name instance in temp register.
2477 // On MIPS, the caller must do the comparison with Handle<String>class_name.
2478}
2479
2480
2481void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2482 Register input = ToRegister(instr->value());
2483 Register temp = scratch0();
2484 Register temp2 = ToRegister(instr->temp());
2485 Handle<String> class_name = instr->hydrogen()->class_name();
2486
2487 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2488 class_name, input, temp, temp2);
2489
2490 EmitBranch(instr, eq, temp, Operand(class_name));
2491}
2492
2493
2494void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2495 Register reg = ToRegister(instr->value());
2496 Register temp = ToRegister(instr->temp());
2497
2498 __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2499 EmitBranch(instr, eq, temp, Operand(instr->map()));
2500}
2501
2502
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002503void LCodeGen::DoHasInPrototypeChainAndBranch(
2504 LHasInPrototypeChainAndBranch* instr) {
2505 Register const object = ToRegister(instr->object());
2506 Register const object_map = scratch0();
2507 Register const object_instance_type = scratch1();
2508 Register const object_prototype = object_map;
2509 Register const prototype = ToRegister(instr->prototype());
2510
2511 // The {object} must be a spec object. It's sufficient to know that {object}
2512 // is not a smi, since all other non-spec objects have {null} prototypes and
2513 // will be ruled out below.
2514 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2515 __ SmiTst(object, at);
2516 EmitFalseBranch(instr, eq, at, Operand(zero_reg));
2517 }
2518
2519 // Loop through the {object}s prototype chain looking for the {prototype}.
2520 __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2521 Label loop;
2522 __ bind(&loop);
2523
2524 // Deoptimize if the object needs to be access checked.
2525 __ lbu(object_instance_type,
2526 FieldMemOperand(object_map, Map::kBitFieldOffset));
2527 __ And(object_instance_type, object_instance_type,
2528 Operand(1 << Map::kIsAccessCheckNeeded));
2529 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
2530 Operand(zero_reg));
2531 __ lbu(object_instance_type,
2532 FieldMemOperand(object_map, Map::kInstanceTypeOffset));
2533 DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
2534 Operand(JS_PROXY_TYPE));
2535
2536 __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002537 __ LoadRoot(at, Heap::kNullValueRootIndex);
2538 EmitFalseBranch(instr, eq, object_prototype, Operand(at));
Ben Murdoch61f157c2016-09-16 13:49:30 +01002539 EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002540 __ Branch(&loop, USE_DELAY_SLOT);
2541 __ ld(object_map, FieldMemOperand(object_prototype,
2542 HeapObject::kMapOffset)); // In delay slot.
2543}
2544
2545
2546void LCodeGen::DoCmpT(LCmpT* instr) {
2547 DCHECK(ToRegister(instr->context()).is(cp));
2548 Token::Value op = instr->op();
2549
Ben Murdoch097c5b22016-05-18 11:27:45 +01002550 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002551 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2552 // On MIPS there is no need for a "no inlined smi code" marker (nop).
2553
2554 Condition condition = ComputeCompareCondition(op);
2555 // A minor optimization that relies on LoadRoot always emitting one
2556 // instruction.
2557 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2558 Label done, check;
2559 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2560 __ bind(&check);
2561 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2562 DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
2563 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2564 __ bind(&done);
2565}
2566
2567
2568void LCodeGen::DoReturn(LReturn* instr) {
2569 if (FLAG_trace && info()->IsOptimizing()) {
2570 // Push the return value on the stack as the parameter.
2571 // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2572 // managed by the register allocator and tearing down the frame, it's
2573 // safe to write to the context register.
2574 __ push(v0);
2575 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2576 __ CallRuntime(Runtime::kTraceExit);
2577 }
2578 if (info()->saves_caller_doubles()) {
2579 RestoreCallerDoubles();
2580 }
2581 if (NeedsEagerFrame()) {
2582 __ mov(sp, fp);
2583 __ Pop(ra, fp);
2584 }
2585 if (instr->has_constant_parameter_count()) {
2586 int parameter_count = ToInteger32(instr->constant_parameter_count());
2587 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2588 if (sp_delta != 0) {
2589 __ Daddu(sp, sp, Operand(sp_delta));
2590 }
2591 } else {
2592 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2593 Register reg = ToRegister(instr->parameter_count());
2594 // The argument count parameter is a smi
2595 __ SmiUntag(reg);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002596 __ Dlsa(sp, sp, reg, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002597 }
2598
2599 __ Jump(ra);
2600}
2601
2602
2603template <class T>
2604void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2605 Register vector_register = ToRegister(instr->temp_vector());
2606 Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2607 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2608 DCHECK(slot_register.is(a0));
2609
2610 AllowDeferredHandleDereference vector_structure_check;
2611 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2612 __ li(vector_register, vector);
2613 // No need to allocate this register.
2614 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2615 int index = vector->GetIndex(slot);
2616 __ li(slot_register, Operand(Smi::FromInt(index)));
2617}
2618
2619
2620template <class T>
2621void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2622 Register vector_register = ToRegister(instr->temp_vector());
2623 Register slot_register = ToRegister(instr->temp_slot());
2624
2625 AllowDeferredHandleDereference vector_structure_check;
2626 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2627 __ li(vector_register, vector);
2628 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2629 int index = vector->GetIndex(slot);
2630 __ li(slot_register, Operand(Smi::FromInt(index)));
2631}
2632
2633
2634void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2635 DCHECK(ToRegister(instr->context()).is(cp));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002636 DCHECK(ToRegister(instr->result()).is(v0));
2637
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002638 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
Ben Murdoch61f157c2016-09-16 13:49:30 +01002639 Handle<Code> ic =
2640 CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
2641 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002642 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2643}
2644
2645
2646void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2647 Register context = ToRegister(instr->context());
2648 Register result = ToRegister(instr->result());
2649
2650 __ ld(result, ContextMemOperand(context, instr->slot_index()));
2651 if (instr->hydrogen()->RequiresHoleCheck()) {
2652 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2653
2654 if (instr->hydrogen()->DeoptimizesOnHole()) {
2655 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
2656 } else {
2657 Label is_not_hole;
2658 __ Branch(&is_not_hole, ne, result, Operand(at));
2659 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2660 __ bind(&is_not_hole);
2661 }
2662 }
2663}
2664
2665
2666void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2667 Register context = ToRegister(instr->context());
2668 Register value = ToRegister(instr->value());
2669 Register scratch = scratch0();
2670 MemOperand target = ContextMemOperand(context, instr->slot_index());
2671
2672 Label skip_assignment;
2673
2674 if (instr->hydrogen()->RequiresHoleCheck()) {
2675 __ ld(scratch, target);
2676 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2677
2678 if (instr->hydrogen()->DeoptimizesOnHole()) {
2679 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
2680 } else {
2681 __ Branch(&skip_assignment, ne, scratch, Operand(at));
2682 }
2683 }
2684
2685 __ sd(value, target);
2686 if (instr->hydrogen()->NeedsWriteBarrier()) {
2687 SmiCheck check_needed =
2688 instr->hydrogen()->value()->type().IsHeapObject()
2689 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2690 __ RecordWriteContextSlot(context,
2691 target.offset(),
2692 value,
2693 scratch0(),
2694 GetRAState(),
2695 kSaveFPRegs,
2696 EMIT_REMEMBERED_SET,
2697 check_needed);
2698 }
2699
2700 __ bind(&skip_assignment);
2701}
2702
2703
2704void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2705 HObjectAccess access = instr->hydrogen()->access();
2706 int offset = access.offset();
2707 Register object = ToRegister(instr->object());
2708 if (access.IsExternalMemory()) {
2709 Register result = ToRegister(instr->result());
2710 MemOperand operand = MemOperand(object, offset);
2711 __ Load(result, operand, access.representation());
2712 return;
2713 }
2714
2715 if (instr->hydrogen()->representation().IsDouble()) {
2716 DoubleRegister result = ToDoubleRegister(instr->result());
2717 __ ldc1(result, FieldMemOperand(object, offset));
2718 return;
2719 }
2720
2721 Register result = ToRegister(instr->result());
2722 if (!access.IsInobject()) {
2723 __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2724 object = result;
2725 }
2726
2727 Representation representation = access.representation();
2728 if (representation.IsSmi() && SmiValuesAre32Bits() &&
2729 instr->hydrogen()->representation().IsInteger32()) {
2730 if (FLAG_debug_code) {
2731 // Verify this is really an Smi.
2732 Register scratch = scratch0();
2733 __ Load(scratch, FieldMemOperand(object, offset), representation);
2734 __ AssertSmi(scratch);
2735 }
2736
2737 // Read int value directly from upper half of the smi.
2738 STATIC_ASSERT(kSmiTag == 0);
2739 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
2740 offset = SmiWordOffset(offset);
2741 representation = Representation::Integer32();
2742 }
2743 __ Load(result, FieldMemOperand(object, offset), representation);
2744}
2745
2746
2747void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2748 DCHECK(ToRegister(instr->context()).is(cp));
2749 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2750 DCHECK(ToRegister(instr->result()).is(v0));
2751
2752 // Name is always in a2.
2753 __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2754 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Ben Murdoch61f157c2016-09-16 13:49:30 +01002755 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002756 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2757}
2758
2759
2760void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2761 Register scratch = scratch0();
2762 Register function = ToRegister(instr->function());
2763 Register result = ToRegister(instr->result());
2764
2765 // Get the prototype or initial map from the function.
2766 __ ld(result,
2767 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2768
2769 // Check that the function has a prototype or an initial map.
2770 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2771 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
2772
2773 // If the function does not have an initial map, we're done.
2774 Label done;
2775 __ GetObjectType(result, scratch, scratch);
2776 __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2777
2778 // Get the prototype from the initial map.
2779 __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
2780
2781 // All done.
2782 __ bind(&done);
2783}
2784
2785
2786void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2787 Register result = ToRegister(instr->result());
2788 __ LoadRoot(result, instr->index());
2789}
2790
2791
2792void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2793 Register arguments = ToRegister(instr->arguments());
2794 Register result = ToRegister(instr->result());
2795 // There are two words between the frame pointer and the last argument.
2796 // Subtracting from length accounts for one of them add one more.
2797 if (instr->length()->IsConstantOperand()) {
2798 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2799 if (instr->index()->IsConstantOperand()) {
2800 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2801 int index = (const_length - const_index) + 1;
2802 __ ld(result, MemOperand(arguments, index * kPointerSize));
2803 } else {
2804 Register index = ToRegister(instr->index());
2805 __ li(at, Operand(const_length + 1));
2806 __ Dsubu(result, at, index);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002807 __ Dlsa(at, arguments, result, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002808 __ ld(result, MemOperand(at));
2809 }
2810 } else if (instr->index()->IsConstantOperand()) {
2811 Register length = ToRegister(instr->length());
2812 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2813 int loc = const_index - 1;
2814 if (loc != 0) {
2815 __ Dsubu(result, length, Operand(loc));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002816 __ Dlsa(at, arguments, result, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002817 __ ld(result, MemOperand(at));
2818 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002819 __ Dlsa(at, arguments, length, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002820 __ ld(result, MemOperand(at));
2821 }
2822 } else {
2823 Register length = ToRegister(instr->length());
2824 Register index = ToRegister(instr->index());
2825 __ Dsubu(result, length, index);
2826 __ Daddu(result, result, 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002827 __ Dlsa(at, arguments, result, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002828 __ ld(result, MemOperand(at));
2829 }
2830}
2831
2832
2833void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2834 Register external_pointer = ToRegister(instr->elements());
2835 Register key = no_reg;
2836 ElementsKind elements_kind = instr->elements_kind();
2837 bool key_is_constant = instr->key()->IsConstantOperand();
2838 int constant_key = 0;
2839 if (key_is_constant) {
2840 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2841 if (constant_key & 0xF0000000) {
2842 Abort(kArrayIndexConstantValueTooBig);
2843 }
2844 } else {
2845 key = ToRegister(instr->key());
2846 }
2847 int element_size_shift = ElementsKindToShiftSize(elements_kind);
2848 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2849 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
2850 : element_size_shift;
2851 int base_offset = instr->base_offset();
2852
2853 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2854 FPURegister result = ToDoubleRegister(instr->result());
2855 if (key_is_constant) {
2856 __ Daddu(scratch0(), external_pointer,
2857 constant_key << element_size_shift);
2858 } else {
2859 if (shift_size < 0) {
2860 if (shift_size == -32) {
2861 __ dsra32(scratch0(), key, 0);
2862 } else {
2863 __ dsra(scratch0(), key, -shift_size);
2864 }
2865 } else {
2866 __ dsll(scratch0(), key, shift_size);
2867 }
2868 __ Daddu(scratch0(), scratch0(), external_pointer);
2869 }
2870 if (elements_kind == FLOAT32_ELEMENTS) {
2871 __ lwc1(result, MemOperand(scratch0(), base_offset));
2872 __ cvt_d_s(result, result);
2873 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2874 __ ldc1(result, MemOperand(scratch0(), base_offset));
2875 }
2876 } else {
2877 Register result = ToRegister(instr->result());
2878 MemOperand mem_operand = PrepareKeyedOperand(
2879 key, external_pointer, key_is_constant, constant_key,
2880 element_size_shift, shift_size, base_offset);
2881 switch (elements_kind) {
2882 case INT8_ELEMENTS:
2883 __ lb(result, mem_operand);
2884 break;
2885 case UINT8_ELEMENTS:
2886 case UINT8_CLAMPED_ELEMENTS:
2887 __ lbu(result, mem_operand);
2888 break;
2889 case INT16_ELEMENTS:
2890 __ lh(result, mem_operand);
2891 break;
2892 case UINT16_ELEMENTS:
2893 __ lhu(result, mem_operand);
2894 break;
2895 case INT32_ELEMENTS:
2896 __ lw(result, mem_operand);
2897 break;
2898 case UINT32_ELEMENTS:
2899 __ lw(result, mem_operand);
2900 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2901 DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
2902 result, Operand(0x80000000));
2903 }
2904 break;
2905 case FLOAT32_ELEMENTS:
2906 case FLOAT64_ELEMENTS:
2907 case FAST_DOUBLE_ELEMENTS:
2908 case FAST_ELEMENTS:
2909 case FAST_SMI_ELEMENTS:
2910 case FAST_HOLEY_DOUBLE_ELEMENTS:
2911 case FAST_HOLEY_ELEMENTS:
2912 case FAST_HOLEY_SMI_ELEMENTS:
2913 case DICTIONARY_ELEMENTS:
2914 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2915 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002916 case FAST_STRING_WRAPPER_ELEMENTS:
2917 case SLOW_STRING_WRAPPER_ELEMENTS:
2918 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002919 UNREACHABLE();
2920 break;
2921 }
2922 }
2923}
2924
2925
2926void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2927 Register elements = ToRegister(instr->elements());
2928 bool key_is_constant = instr->key()->IsConstantOperand();
2929 Register key = no_reg;
2930 DoubleRegister result = ToDoubleRegister(instr->result());
2931 Register scratch = scratch0();
2932
2933 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2934
2935 int base_offset = instr->base_offset();
2936 if (key_is_constant) {
2937 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2938 if (constant_key & 0xF0000000) {
2939 Abort(kArrayIndexConstantValueTooBig);
2940 }
2941 base_offset += constant_key * kDoubleSize;
2942 }
2943 __ Daddu(scratch, elements, Operand(base_offset));
2944
2945 if (!key_is_constant) {
2946 key = ToRegister(instr->key());
2947 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2948 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
2949 : element_size_shift;
2950 if (shift_size > 0) {
2951 __ dsll(at, key, shift_size);
2952 } else if (shift_size == -32) {
2953 __ dsra32(at, key, 0);
2954 } else {
2955 __ dsra(at, key, -shift_size);
2956 }
2957 __ Daddu(scratch, scratch, at);
2958 }
2959
2960 __ ldc1(result, MemOperand(scratch));
2961
2962 if (instr->hydrogen()->RequiresHoleCheck()) {
2963 __ FmoveHigh(scratch, result);
2964 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
2965 Operand(static_cast<int32_t>(kHoleNanUpper32)));
2966 }
2967}
2968
2969
2970void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2971 HLoadKeyed* hinstr = instr->hydrogen();
2972 Register elements = ToRegister(instr->elements());
2973 Register result = ToRegister(instr->result());
2974 Register scratch = scratch0();
2975 Register store_base = scratch;
2976 int offset = instr->base_offset();
2977
2978 if (instr->key()->IsConstantOperand()) {
2979 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2980 offset += ToInteger32(const_operand) * kPointerSize;
2981 store_base = elements;
2982 } else {
2983 Register key = ToRegister(instr->key());
2984 // Even though the HLoadKeyed instruction forces the input
2985 // representation for the key to be an integer, the input gets replaced
2986 // during bound check elimination with the index argument to the bounds
2987 // check, which can be tagged, so that case must be handled here, too.
2988 if (instr->hydrogen()->key()->representation().IsSmi()) {
2989 __ SmiScale(scratch, key, kPointerSizeLog2);
2990 __ daddu(scratch, elements, scratch);
2991 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002992 __ Dlsa(scratch, elements, key, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002993 }
2994 }
2995
2996 Representation representation = hinstr->representation();
2997 if (representation.IsInteger32() && SmiValuesAre32Bits() &&
2998 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
2999 DCHECK(!hinstr->RequiresHoleCheck());
3000 if (FLAG_debug_code) {
3001 Register temp = scratch1();
3002 __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
3003 __ AssertSmi(temp);
3004 }
3005
3006 // Read int value directly from upper half of the smi.
3007 STATIC_ASSERT(kSmiTag == 0);
3008 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3009 offset = SmiWordOffset(offset);
3010 }
3011
3012 __ Load(result, MemOperand(store_base, offset), representation);
3013
3014 // Check for the hole value.
3015 if (hinstr->RequiresHoleCheck()) {
3016 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3017 __ SmiTst(result, scratch);
3018 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
3019 Operand(zero_reg));
3020 } else {
3021 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3022 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
3023 }
3024 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3025 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3026 Label done;
3027 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3028 __ Branch(&done, ne, result, Operand(scratch));
3029 if (info()->IsStub()) {
3030 // A stub can safely convert the hole to undefined only if the array
3031 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3032 // it needs to bail out.
3033 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3034 // The comparison only needs LS bits of value, which is a smi.
3035 __ ld(result, FieldMemOperand(result, Cell::kValueOffset));
3036 DeoptimizeIf(ne, instr, Deoptimizer::kHole, result,
3037 Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
3038 }
3039 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3040 __ bind(&done);
3041 }
3042}
3043
3044
3045void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3046 if (instr->is_fixed_typed_array()) {
3047 DoLoadKeyedExternalArray(instr);
3048 } else if (instr->hydrogen()->representation().IsDouble()) {
3049 DoLoadKeyedFixedDoubleArray(instr);
3050 } else {
3051 DoLoadKeyedFixedArray(instr);
3052 }
3053}
3054
3055
3056MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3057 Register base,
3058 bool key_is_constant,
3059 int constant_key,
3060 int element_size,
3061 int shift_size,
3062 int base_offset) {
3063 if (key_is_constant) {
3064 return MemOperand(base, (constant_key << element_size) + base_offset);
3065 }
3066
3067 if (base_offset == 0) {
3068 if (shift_size >= 0) {
3069 __ dsll(scratch0(), key, shift_size);
3070 __ Daddu(scratch0(), base, scratch0());
3071 return MemOperand(scratch0());
3072 } else {
3073 if (shift_size == -32) {
3074 __ dsra32(scratch0(), key, 0);
3075 } else {
3076 __ dsra(scratch0(), key, -shift_size);
3077 }
3078 __ Daddu(scratch0(), base, scratch0());
3079 return MemOperand(scratch0());
3080 }
3081 }
3082
3083 if (shift_size >= 0) {
3084 __ dsll(scratch0(), key, shift_size);
3085 __ Daddu(scratch0(), base, scratch0());
3086 return MemOperand(scratch0(), base_offset);
3087 } else {
3088 if (shift_size == -32) {
3089 __ dsra32(scratch0(), key, 0);
3090 } else {
3091 __ dsra(scratch0(), key, -shift_size);
3092 }
3093 __ Daddu(scratch0(), base, scratch0());
3094 return MemOperand(scratch0(), base_offset);
3095 }
3096}
3097
3098
3099void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3100 DCHECK(ToRegister(instr->context()).is(cp));
3101 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3102 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3103
Ben Murdoch61f157c2016-09-16 13:49:30 +01003104 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003105
Ben Murdoch61f157c2016-09-16 13:49:30 +01003106 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003107 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3108}
3109
3110
3111void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3112 Register scratch = scratch0();
3113 Register temp = scratch1();
3114 Register result = ToRegister(instr->result());
3115
3116 if (instr->hydrogen()->from_inlined()) {
3117 __ Dsubu(result, sp, 2 * kPointerSize);
Ben Murdochda12d292016-06-02 14:46:10 +01003118 } else if (instr->hydrogen()->arguments_adaptor()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003119 // Check if the calling frame is an arguments adaptor frame.
3120 Label done, adapted;
3121 __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01003122 __ ld(result,
3123 MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003124 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3125
3126 // Result is the frame pointer for the frame if not adapted and for the real
3127 // frame below the adaptor frame if adapted.
3128 __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
3129 __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
Ben Murdochda12d292016-06-02 14:46:10 +01003130 } else {
3131 __ mov(result, fp);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003132 }
3133}
3134
3135
3136void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3137 Register elem = ToRegister(instr->elements());
3138 Register result = ToRegister(instr->result());
3139
3140 Label done;
3141
3142 // If no arguments adaptor frame the number of arguments is fixed.
3143 __ Daddu(result, zero_reg, Operand(scope()->num_parameters()));
3144 __ Branch(&done, eq, fp, Operand(elem));
3145
3146 // Arguments adaptor frame present. Get argument length from there.
3147 __ ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3148 __ ld(result,
3149 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3150 __ SmiUntag(result);
3151
3152 // Argument length is in result register.
3153 __ bind(&done);
3154}
3155
3156
3157void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3158 Register receiver = ToRegister(instr->receiver());
3159 Register function = ToRegister(instr->function());
3160 Register result = ToRegister(instr->result());
3161 Register scratch = scratch0();
3162
3163 // If the receiver is null or undefined, we have to pass the global
3164 // object as a receiver to normal functions. Values have to be
3165 // passed unchanged to builtins and strict-mode functions.
3166 Label global_object, result_in_receiver;
3167
3168 if (!instr->hydrogen()->known_function()) {
3169 // Do not transform the receiver to object for strict mode functions.
3170 __ ld(scratch,
3171 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3172
3173 // Do not transform the receiver to object for builtins.
3174 int32_t strict_mode_function_mask =
3175 1 << SharedFunctionInfo::kStrictModeBitWithinByte;
3176 int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
3177
3178 __ lbu(at,
3179 FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset));
3180 __ And(at, at, Operand(strict_mode_function_mask));
3181 __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3182 __ lbu(at,
3183 FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
3184 __ And(at, at, Operand(native_mask));
3185 __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3186 }
3187
3188 // Normal function. Replace undefined or null with global receiver.
3189 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3190 __ Branch(&global_object, eq, receiver, Operand(scratch));
3191 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3192 __ Branch(&global_object, eq, receiver, Operand(scratch));
3193
3194 // Deoptimize if the receiver is not a JS object.
3195 __ SmiTst(receiver, scratch);
3196 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
3197
3198 __ GetObjectType(receiver, scratch, scratch);
3199 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
3200 Operand(FIRST_JS_RECEIVER_TYPE));
3201 __ Branch(&result_in_receiver);
3202
3203 __ bind(&global_object);
3204 __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
3205 __ ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3206 __ ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3207
3208 if (result.is(receiver)) {
3209 __ bind(&result_in_receiver);
3210 } else {
3211 Label result_ok;
3212 __ Branch(&result_ok);
3213 __ bind(&result_in_receiver);
3214 __ mov(result, receiver);
3215 __ bind(&result_ok);
3216 }
3217}
3218
3219
3220void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3221 Register receiver = ToRegister(instr->receiver());
3222 Register function = ToRegister(instr->function());
3223 Register length = ToRegister(instr->length());
3224 Register elements = ToRegister(instr->elements());
3225 Register scratch = scratch0();
3226 DCHECK(receiver.is(a0)); // Used for parameter count.
3227 DCHECK(function.is(a1)); // Required by InvokeFunction.
3228 DCHECK(ToRegister(instr->result()).is(v0));
3229
3230 // Copy the arguments to this function possibly from the
3231 // adaptor frame below it.
3232 const uint32_t kArgumentsLimit = 1 * KB;
3233 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
3234 Operand(kArgumentsLimit));
3235
3236 // Push the receiver and use the register to keep the original
3237 // number of arguments.
3238 __ push(receiver);
3239 __ Move(receiver, length);
3240 // The arguments are at a one pointer size offset from elements.
3241 __ Daddu(elements, elements, Operand(1 * kPointerSize));
3242
3243 // Loop through the arguments pushing them onto the execution
3244 // stack.
3245 Label invoke, loop;
3246 // length is a small non-negative integer, due to the test above.
3247 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3248 __ dsll(scratch, length, kPointerSizeLog2);
3249 __ bind(&loop);
3250 __ Daddu(scratch, elements, scratch);
3251 __ ld(scratch, MemOperand(scratch));
3252 __ push(scratch);
3253 __ Dsubu(length, length, Operand(1));
3254 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3255 __ dsll(scratch, length, kPointerSizeLog2);
3256
3257 __ bind(&invoke);
Ben Murdochda12d292016-06-02 14:46:10 +01003258
3259 InvokeFlag flag = CALL_FUNCTION;
3260 if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
3261 DCHECK(!info()->saves_caller_doubles());
3262 // TODO(ishell): drop current frame before pushing arguments to the stack.
3263 flag = JUMP_FUNCTION;
3264 ParameterCount actual(a0);
3265 // It is safe to use t0, t1 and t2 as scratch registers here given that
3266 // we are not going to return to caller function anyway.
3267 PrepareForTailCall(actual, t0, t1, t2);
3268 }
3269
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003270 DCHECK(instr->HasPointerMap());
3271 LPointerMap* pointers = instr->pointer_map();
Ben Murdochda12d292016-06-02 14:46:10 +01003272 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003273 // The number of arguments is stored in receiver which is a0, as expected
3274 // by InvokeFunction.
3275 ParameterCount actual(receiver);
Ben Murdochda12d292016-06-02 14:46:10 +01003276 __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003277}
3278
3279
3280void LCodeGen::DoPushArgument(LPushArgument* instr) {
3281 LOperand* argument = instr->value();
3282 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3283 Abort(kDoPushArgumentNotImplementedForDoubleType);
3284 } else {
3285 Register argument_reg = EmitLoadRegister(argument, at);
3286 __ push(argument_reg);
3287 }
3288}
3289
3290
3291void LCodeGen::DoDrop(LDrop* instr) {
3292 __ Drop(instr->count());
3293}
3294
3295
3296void LCodeGen::DoThisFunction(LThisFunction* instr) {
3297 Register result = ToRegister(instr->result());
3298 __ ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3299}
3300
3301
3302void LCodeGen::DoContext(LContext* instr) {
3303 // If there is a non-return use, the context must be moved to a register.
3304 Register result = ToRegister(instr->result());
3305 if (info()->IsOptimizing()) {
3306 __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3307 } else {
3308 // If there is no frame, the context must be in cp.
3309 DCHECK(result.is(cp));
3310 }
3311}
3312
3313
3314void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3315 DCHECK(ToRegister(instr->context()).is(cp));
3316 __ li(scratch0(), instr->hydrogen()->pairs());
3317 __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3318 __ Push(scratch0(), scratch1());
3319 CallRuntime(Runtime::kDeclareGlobals, instr);
3320}
3321
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003322void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3323 int formal_parameter_count, int arity,
Ben Murdochda12d292016-06-02 14:46:10 +01003324 bool is_tail_call, LInstruction* instr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003325 bool dont_adapt_arguments =
3326 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3327 bool can_invoke_directly =
3328 dont_adapt_arguments || formal_parameter_count == arity;
3329
3330 Register function_reg = a1;
3331 LPointerMap* pointers = instr->pointer_map();
3332
3333 if (can_invoke_directly) {
3334 // Change context.
3335 __ ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3336
3337 // Always initialize new target and number of actual arguments.
3338 __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
3339 __ li(a0, Operand(arity));
3340
Ben Murdochda12d292016-06-02 14:46:10 +01003341 bool is_self_call = function.is_identical_to(info()->closure());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003342
Ben Murdochda12d292016-06-02 14:46:10 +01003343 // Invoke function.
3344 if (is_self_call) {
3345 Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
3346 if (is_tail_call) {
3347 __ Jump(self, RelocInfo::CODE_TARGET);
3348 } else {
3349 __ Call(self, RelocInfo::CODE_TARGET);
3350 }
3351 } else {
3352 __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3353 if (is_tail_call) {
3354 __ Jump(at);
3355 } else {
3356 __ Call(at);
3357 }
3358 }
3359
3360 if (!is_tail_call) {
3361 // Set up deoptimization.
3362 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3363 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003364 } else {
3365 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdochda12d292016-06-02 14:46:10 +01003366 ParameterCount actual(arity);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003367 ParameterCount expected(formal_parameter_count);
Ben Murdochda12d292016-06-02 14:46:10 +01003368 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3369 __ InvokeFunction(function_reg, expected, actual, flag, generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003370 }
3371}
3372
3373
3374void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3375 DCHECK(instr->context() != NULL);
3376 DCHECK(ToRegister(instr->context()).is(cp));
3377 Register input = ToRegister(instr->value());
3378 Register result = ToRegister(instr->result());
3379 Register scratch = scratch0();
3380
3381 // Deoptimize if not a heap number.
3382 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3383 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3384 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
3385
3386 Label done;
3387 Register exponent = scratch0();
3388 scratch = no_reg;
3389 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3390 // Check the sign of the argument. If the argument is positive, just
3391 // return it.
3392 __ Move(result, input);
3393 __ And(at, exponent, Operand(HeapNumber::kSignMask));
3394 __ Branch(&done, eq, at, Operand(zero_reg));
3395
3396 // Input is negative. Reverse its sign.
3397 // Preserve the value of all registers.
3398 {
3399 PushSafepointRegistersScope scope(this);
3400
3401 // Registers were saved at the safepoint, so we can use
3402 // many scratch registers.
3403 Register tmp1 = input.is(a1) ? a0 : a1;
3404 Register tmp2 = input.is(a2) ? a0 : a2;
3405 Register tmp3 = input.is(a3) ? a0 : a3;
3406 Register tmp4 = input.is(a4) ? a0 : a4;
3407
3408 // exponent: floating point exponent value.
3409
3410 Label allocated, slow;
3411 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3412 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3413 __ Branch(&allocated);
3414
3415 // Slow case: Call the runtime system to do the number allocation.
3416 __ bind(&slow);
3417
3418 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3419 instr->context());
3420 // Set the pointer to the new heap number in tmp.
3421 if (!tmp1.is(v0))
3422 __ mov(tmp1, v0);
3423 // Restore input_reg after call to runtime.
3424 __ LoadFromSafepointRegisterSlot(input, input);
3425 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3426
3427 __ bind(&allocated);
3428 // exponent: floating point exponent value.
3429 // tmp1: allocated heap number.
3430 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3431 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3432 __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3433 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3434
3435 __ StoreToSafepointRegisterSlot(tmp1, result);
3436 }
3437
3438 __ bind(&done);
3439}
3440
3441
3442void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3443 Register input = ToRegister(instr->value());
3444 Register result = ToRegister(instr->result());
3445 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3446 Label done;
3447 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3448 __ mov(result, input);
3449 __ subu(result, zero_reg, input);
3450 // Overflow if result is still negative, i.e. 0x80000000.
3451 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
3452 __ bind(&done);
3453}
3454
3455
3456void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3457 Register input = ToRegister(instr->value());
3458 Register result = ToRegister(instr->result());
3459 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3460 Label done;
3461 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3462 __ mov(result, input);
3463 __ dsubu(result, zero_reg, input);
3464 // Overflow if result is still negative, i.e. 0x80000000 00000000.
3465 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
3466 __ bind(&done);
3467}
3468
3469
3470void LCodeGen::DoMathAbs(LMathAbs* instr) {
3471 // Class for deferred case.
3472 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3473 public:
3474 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3475 : LDeferredCode(codegen), instr_(instr) { }
3476 void Generate() override {
3477 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3478 }
3479 LInstruction* instr() override { return instr_; }
3480
3481 private:
3482 LMathAbs* instr_;
3483 };
3484
3485 Representation r = instr->hydrogen()->value()->representation();
3486 if (r.IsDouble()) {
3487 FPURegister input = ToDoubleRegister(instr->value());
3488 FPURegister result = ToDoubleRegister(instr->result());
3489 __ abs_d(result, input);
3490 } else if (r.IsInteger32()) {
3491 EmitIntegerMathAbs(instr);
3492 } else if (r.IsSmi()) {
3493 EmitSmiMathAbs(instr);
3494 } else {
3495 // Representation is tagged.
3496 DeferredMathAbsTaggedHeapNumber* deferred =
3497 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3498 Register input = ToRegister(instr->value());
3499 // Smi check.
3500 __ JumpIfNotSmi(input, deferred->entry());
3501 // If smi, handle it directly.
3502 EmitSmiMathAbs(instr);
3503 __ bind(deferred->exit());
3504 }
3505}
3506
3507
3508void LCodeGen::DoMathFloor(LMathFloor* instr) {
3509 DoubleRegister input = ToDoubleRegister(instr->value());
3510 Register result = ToRegister(instr->result());
3511 Register scratch1 = scratch0();
3512 Register except_flag = ToRegister(instr->temp());
3513
3514 __ EmitFPUTruncate(kRoundToMinusInf,
3515 result,
3516 input,
3517 scratch1,
3518 double_scratch0(),
3519 except_flag);
3520
3521 // Deopt if the operation did not succeed.
3522 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3523 Operand(zero_reg));
3524
3525 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3526 // Test for -0.
3527 Label done;
3528 __ Branch(&done, ne, result, Operand(zero_reg));
3529 __ mfhc1(scratch1, input); // Get exponent/sign bits.
3530 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3531 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
3532 Operand(zero_reg));
3533 __ bind(&done);
3534 }
3535}
3536
3537
3538void LCodeGen::DoMathRound(LMathRound* instr) {
3539 DoubleRegister input = ToDoubleRegister(instr->value());
3540 Register result = ToRegister(instr->result());
3541 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3542 Register scratch = scratch0();
3543 Label done, check_sign_on_zero;
3544
3545 // Extract exponent bits.
3546 __ mfhc1(result, input);
3547 __ Ext(scratch,
3548 result,
3549 HeapNumber::kExponentShift,
3550 HeapNumber::kExponentBits);
3551
3552 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3553 Label skip1;
3554 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3555 __ mov(result, zero_reg);
3556 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3557 __ Branch(&check_sign_on_zero);
3558 } else {
3559 __ Branch(&done);
3560 }
3561 __ bind(&skip1);
3562
3563 // The following conversion will not work with numbers
3564 // outside of ]-2^32, 2^32[.
3565 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
3566 Operand(HeapNumber::kExponentBias + 32));
3567
3568 // Save the original sign for later comparison.
3569 __ And(scratch, result, Operand(HeapNumber::kSignMask));
3570
3571 __ Move(double_scratch0(), 0.5);
3572 __ add_d(double_scratch0(), input, double_scratch0());
3573
3574 // Check sign of the result: if the sign changed, the input
3575 // value was in ]0.5, 0[ and the result should be -0.
3576 __ mfhc1(result, double_scratch0());
3577 // mfhc1 sign-extends, clear the upper bits.
3578 __ dsll32(result, result, 0);
3579 __ dsrl32(result, result, 0);
3580 __ Xor(result, result, Operand(scratch));
3581 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3582 // ARM uses 'mi' here, which is 'lt'
3583 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
3584 } else {
3585 Label skip2;
3586 // ARM uses 'mi' here, which is 'lt'
3587 // Negating it results in 'ge'
3588 __ Branch(&skip2, ge, result, Operand(zero_reg));
3589 __ mov(result, zero_reg);
3590 __ Branch(&done);
3591 __ bind(&skip2);
3592 }
3593
3594 Register except_flag = scratch;
3595 __ EmitFPUTruncate(kRoundToMinusInf,
3596 result,
3597 double_scratch0(),
3598 at,
3599 double_scratch1,
3600 except_flag);
3601
3602 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3603 Operand(zero_reg));
3604
3605 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3606 // Test for -0.
3607 __ Branch(&done, ne, result, Operand(zero_reg));
3608 __ bind(&check_sign_on_zero);
3609 __ mfhc1(scratch, input); // Get exponent/sign bits.
3610 __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3611 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
3612 Operand(zero_reg));
3613 }
3614 __ bind(&done);
3615}
3616
3617
3618void LCodeGen::DoMathFround(LMathFround* instr) {
3619 DoubleRegister input = ToDoubleRegister(instr->value());
3620 DoubleRegister result = ToDoubleRegister(instr->result());
3621 __ cvt_s_d(result, input);
3622 __ cvt_d_s(result, result);
3623}
3624
3625
3626void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3627 DoubleRegister input = ToDoubleRegister(instr->value());
3628 DoubleRegister result = ToDoubleRegister(instr->result());
3629 __ sqrt_d(result, input);
3630}
3631
3632
3633void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3634 DoubleRegister input = ToDoubleRegister(instr->value());
3635 DoubleRegister result = ToDoubleRegister(instr->result());
3636 DoubleRegister temp = ToDoubleRegister(instr->temp());
3637
3638 DCHECK(!input.is(result));
3639
3640 // Note that according to ECMA-262 15.8.2.13:
3641 // Math.pow(-Infinity, 0.5) == Infinity
3642 // Math.sqrt(-Infinity) == NaN
3643 Label done;
3644 __ Move(temp, static_cast<double>(-V8_INFINITY));
3645 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3646 // Set up Infinity in the delay slot.
3647 // result is overwritten if the branch is not taken.
3648 __ neg_d(result, temp);
3649
3650 // Add +0 to convert -0 to +0.
3651 __ add_d(result, input, kDoubleRegZero);
3652 __ sqrt_d(result, result);
3653 __ bind(&done);
3654}
3655
3656
3657void LCodeGen::DoPower(LPower* instr) {
3658 Representation exponent_type = instr->hydrogen()->right()->representation();
3659 // Having marked this as a call, we can use any registers.
3660 // Just make sure that the input/output registers are the expected ones.
3661 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3662 DCHECK(!instr->right()->IsDoubleRegister() ||
3663 ToDoubleRegister(instr->right()).is(f4));
3664 DCHECK(!instr->right()->IsRegister() ||
3665 ToRegister(instr->right()).is(tagged_exponent));
3666 DCHECK(ToDoubleRegister(instr->left()).is(f2));
3667 DCHECK(ToDoubleRegister(instr->result()).is(f0));
3668
3669 if (exponent_type.IsSmi()) {
3670 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3671 __ CallStub(&stub);
3672 } else if (exponent_type.IsTagged()) {
3673 Label no_deopt;
3674 __ JumpIfSmi(tagged_exponent, &no_deopt);
3675 DCHECK(!a7.is(tagged_exponent));
3676 __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3677 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3678 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, a7, Operand(at));
3679 __ bind(&no_deopt);
3680 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3681 __ CallStub(&stub);
3682 } else if (exponent_type.IsInteger32()) {
3683 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3684 __ CallStub(&stub);
3685 } else {
3686 DCHECK(exponent_type.IsDouble());
3687 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3688 __ CallStub(&stub);
3689 }
3690}
3691
Ben Murdoch61f157c2016-09-16 13:49:30 +01003692void LCodeGen::DoMathCos(LMathCos* instr) {
3693 __ PrepareCallCFunction(0, 1, scratch0());
3694 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3695 __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
3696 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3697}
3698
3699void LCodeGen::DoMathSin(LMathSin* instr) {
3700 __ PrepareCallCFunction(0, 1, scratch0());
3701 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3702 __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
3703 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3704}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003705
3706void LCodeGen::DoMathExp(LMathExp* instr) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003707 __ PrepareCallCFunction(0, 1, scratch0());
3708 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3709 __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
3710 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003711}
3712
3713
3714void LCodeGen::DoMathLog(LMathLog* instr) {
3715 __ PrepareCallCFunction(0, 1, scratch0());
3716 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
Ben Murdoch61f157c2016-09-16 13:49:30 +01003717 __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003718 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3719}
3720
3721
3722void LCodeGen::DoMathClz32(LMathClz32* instr) {
3723 Register input = ToRegister(instr->value());
3724 Register result = ToRegister(instr->result());
3725 __ Clz(result, input);
3726}
3727
Ben Murdochda12d292016-06-02 14:46:10 +01003728void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
3729 Register scratch1, Register scratch2,
3730 Register scratch3) {
3731#if DEBUG
3732 if (actual.is_reg()) {
3733 DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
3734 } else {
3735 DCHECK(!AreAliased(scratch1, scratch2, scratch3));
3736 }
3737#endif
3738 if (FLAG_code_comments) {
3739 if (actual.is_reg()) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003740 Comment(";;; PrepareForTailCall, actual: %s {",
3741 RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
3742 actual.reg().code()));
Ben Murdochda12d292016-06-02 14:46:10 +01003743 } else {
3744 Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
3745 }
3746 }
3747
3748 // Check if next frame is an arguments adaptor frame.
3749 Register caller_args_count_reg = scratch1;
3750 Label no_arguments_adaptor, formal_parameter_count_loaded;
3751 __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3752 __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
3753 __ Branch(&no_arguments_adaptor, ne, scratch3,
3754 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3755
3756 // Drop current frame and load arguments count from arguments adaptor frame.
3757 __ mov(fp, scratch2);
3758 __ ld(caller_args_count_reg,
3759 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
3760 __ SmiUntag(caller_args_count_reg);
3761 __ Branch(&formal_parameter_count_loaded);
3762
3763 __ bind(&no_arguments_adaptor);
3764 // Load caller's formal parameter count
3765 __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
3766
3767 __ bind(&formal_parameter_count_loaded);
3768 __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
3769
3770 Comment(";;; }");
3771}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003772
3773void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
Ben Murdochda12d292016-06-02 14:46:10 +01003774 HInvokeFunction* hinstr = instr->hydrogen();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003775 DCHECK(ToRegister(instr->context()).is(cp));
3776 DCHECK(ToRegister(instr->function()).is(a1));
3777 DCHECK(instr->HasPointerMap());
3778
Ben Murdochda12d292016-06-02 14:46:10 +01003779 bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
3780
3781 if (is_tail_call) {
3782 DCHECK(!info()->saves_caller_doubles());
3783 ParameterCount actual(instr->arity());
3784 // It is safe to use t0, t1 and t2 as scratch registers here given that
3785 // we are not going to return to caller function anyway.
3786 PrepareForTailCall(actual, t0, t1, t2);
3787 }
3788
3789 Handle<JSFunction> known_function = hinstr->known_function();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003790 if (known_function.is_null()) {
3791 LPointerMap* pointers = instr->pointer_map();
3792 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdochda12d292016-06-02 14:46:10 +01003793 ParameterCount actual(instr->arity());
3794 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3795 __ InvokeFunction(a1, no_reg, actual, flag, generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003796 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01003797 CallKnownFunction(known_function, hinstr->formal_parameter_count(),
3798 instr->arity(), is_tail_call, instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003799 }
3800}
3801
3802
3803void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3804 DCHECK(ToRegister(instr->result()).is(v0));
3805
3806 if (instr->hydrogen()->IsTailCall()) {
3807 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3808
3809 if (instr->target()->IsConstantOperand()) {
3810 LConstantOperand* target = LConstantOperand::cast(instr->target());
3811 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3812 __ Jump(code, RelocInfo::CODE_TARGET);
3813 } else {
3814 DCHECK(instr->target()->IsRegister());
3815 Register target = ToRegister(instr->target());
3816 __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3817 __ Jump(target);
3818 }
3819 } else {
3820 LPointerMap* pointers = instr->pointer_map();
3821 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3822
3823 if (instr->target()->IsConstantOperand()) {
3824 LConstantOperand* target = LConstantOperand::cast(instr->target());
3825 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3826 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3827 __ Call(code, RelocInfo::CODE_TARGET);
3828 } else {
3829 DCHECK(instr->target()->IsRegister());
3830 Register target = ToRegister(instr->target());
3831 generator.BeforeCall(__ CallSize(target));
3832 __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3833 __ Call(target);
3834 }
3835 generator.AfterCall();
3836 }
3837}
3838
3839
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003840void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3841 DCHECK(ToRegister(instr->context()).is(cp));
3842 DCHECK(ToRegister(instr->constructor()).is(a1));
3843 DCHECK(ToRegister(instr->result()).is(v0));
3844
3845 __ li(a0, Operand(instr->arity()));
Ben Murdoch61f157c2016-09-16 13:49:30 +01003846 __ li(a2, instr->hydrogen()->site());
3847
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003848 ElementsKind kind = instr->hydrogen()->elements_kind();
3849 AllocationSiteOverrideMode override_mode =
3850 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3851 ? DISABLE_ALLOCATION_SITES
3852 : DONT_OVERRIDE;
3853
3854 if (instr->arity() == 0) {
3855 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3856 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3857 } else if (instr->arity() == 1) {
3858 Label done;
3859 if (IsFastPackedElementsKind(kind)) {
3860 Label packed_case;
3861 // We might need a change here,
3862 // look at the first argument.
3863 __ ld(a5, MemOperand(sp, 0));
3864 __ Branch(&packed_case, eq, a5, Operand(zero_reg));
3865
3866 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3867 ArraySingleArgumentConstructorStub stub(isolate(),
3868 holey_kind,
3869 override_mode);
3870 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3871 __ jmp(&done);
3872 __ bind(&packed_case);
3873 }
3874
3875 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3876 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3877 __ bind(&done);
3878 } else {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003879 ArrayNArgumentsConstructorStub stub(isolate());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003880 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3881 }
3882}
3883
3884
3885void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3886 CallRuntime(instr->function(), instr->arity(), instr);
3887}
3888
3889
3890void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3891 Register function = ToRegister(instr->function());
3892 Register code_object = ToRegister(instr->code_object());
3893 __ Daddu(code_object, code_object,
3894 Operand(Code::kHeaderSize - kHeapObjectTag));
3895 __ sd(code_object,
3896 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
3897}
3898
3899
3900void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3901 Register result = ToRegister(instr->result());
3902 Register base = ToRegister(instr->base_object());
3903 if (instr->offset()->IsConstantOperand()) {
3904 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3905 __ Daddu(result, base, Operand(ToInteger32(offset)));
3906 } else {
3907 Register offset = ToRegister(instr->offset());
3908 __ Daddu(result, base, offset);
3909 }
3910}
3911
3912
3913void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3914 Representation representation = instr->representation();
3915
3916 Register object = ToRegister(instr->object());
3917 Register scratch2 = scratch1();
3918 Register scratch1 = scratch0();
3919
3920 HObjectAccess access = instr->hydrogen()->access();
3921 int offset = access.offset();
3922 if (access.IsExternalMemory()) {
3923 Register value = ToRegister(instr->value());
3924 MemOperand operand = MemOperand(object, offset);
3925 __ Store(value, operand, representation);
3926 return;
3927 }
3928
3929 __ AssertNotSmi(object);
3930
3931 DCHECK(!representation.IsSmi() ||
3932 !instr->value()->IsConstantOperand() ||
3933 IsSmi(LConstantOperand::cast(instr->value())));
3934 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
3935 DCHECK(access.IsInobject());
3936 DCHECK(!instr->hydrogen()->has_transition());
3937 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3938 DoubleRegister value = ToDoubleRegister(instr->value());
3939 __ sdc1(value, FieldMemOperand(object, offset));
3940 return;
3941 }
3942
3943 if (instr->hydrogen()->has_transition()) {
3944 Handle<Map> transition = instr->hydrogen()->transition_map();
3945 AddDeprecationDependency(transition);
3946 __ li(scratch1, Operand(transition));
3947 __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
3948 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3949 Register temp = ToRegister(instr->temp());
3950 // Update the write barrier for the map field.
3951 __ RecordWriteForMap(object,
3952 scratch1,
3953 temp,
3954 GetRAState(),
3955 kSaveFPRegs);
3956 }
3957 }
3958
3959 // Do the store.
3960 Register destination = object;
3961 if (!access.IsInobject()) {
3962 destination = scratch1;
3963 __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
3964 }
3965
3966 if (representation.IsSmi() && SmiValuesAre32Bits() &&
3967 instr->hydrogen()->value()->representation().IsInteger32()) {
3968 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
3969 if (FLAG_debug_code) {
3970 __ Load(scratch2, FieldMemOperand(destination, offset), representation);
3971 __ AssertSmi(scratch2);
3972 }
3973 // Store int value directly to upper half of the smi.
3974 offset = SmiWordOffset(offset);
3975 representation = Representation::Integer32();
3976 }
3977 MemOperand operand = FieldMemOperand(destination, offset);
3978
3979 if (FLAG_unbox_double_fields && representation.IsDouble()) {
3980 DCHECK(access.IsInobject());
3981 DoubleRegister value = ToDoubleRegister(instr->value());
3982 __ sdc1(value, operand);
3983 } else {
3984 DCHECK(instr->value()->IsRegister());
3985 Register value = ToRegister(instr->value());
3986 __ Store(value, operand, representation);
3987 }
3988
3989 if (instr->hydrogen()->NeedsWriteBarrier()) {
3990 // Update the write barrier for the object for in-object properties.
3991 Register value = ToRegister(instr->value());
3992 __ RecordWriteField(destination,
3993 offset,
3994 value,
3995 scratch2,
3996 GetRAState(),
3997 kSaveFPRegs,
3998 EMIT_REMEMBERED_SET,
3999 instr->hydrogen()->SmiCheckForWriteBarrier(),
4000 instr->hydrogen()->PointersToHereCheckForValue());
4001 }
4002}
4003
4004
4005void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4006 DCHECK(ToRegister(instr->context()).is(cp));
4007 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4008 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4009
Ben Murdoch61f157c2016-09-16 13:49:30 +01004010 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004011
4012 __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
Ben Murdoch61f157c2016-09-16 13:49:30 +01004013 Handle<Code> ic =
4014 CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
4015 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004016 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4017}
4018
4019
4020void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4021 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4022 Operand operand((int64_t)0);
4023 Register reg;
4024 if (instr->index()->IsConstantOperand()) {
4025 operand = ToOperand(instr->index());
4026 reg = ToRegister(instr->length());
4027 cc = CommuteCondition(cc);
4028 } else {
4029 reg = ToRegister(instr->index());
4030 operand = ToOperand(instr->length());
4031 }
4032 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4033 Label done;
4034 __ Branch(&done, NegateCondition(cc), reg, operand);
4035 __ stop("eliminated bounds check failed");
4036 __ bind(&done);
4037 } else {
4038 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
4039 }
4040}
4041
4042
4043void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4044 Register external_pointer = ToRegister(instr->elements());
4045 Register key = no_reg;
4046 ElementsKind elements_kind = instr->elements_kind();
4047 bool key_is_constant = instr->key()->IsConstantOperand();
4048 int constant_key = 0;
4049 if (key_is_constant) {
4050 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4051 if (constant_key & 0xF0000000) {
4052 Abort(kArrayIndexConstantValueTooBig);
4053 }
4054 } else {
4055 key = ToRegister(instr->key());
4056 }
4057 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4058 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4059 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
4060 : element_size_shift;
4061 int base_offset = instr->base_offset();
4062
4063 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
4064 Register address = scratch0();
4065 FPURegister value(ToDoubleRegister(instr->value()));
4066 if (key_is_constant) {
4067 if (constant_key != 0) {
4068 __ Daddu(address, external_pointer,
4069 Operand(constant_key << element_size_shift));
4070 } else {
4071 address = external_pointer;
4072 }
4073 } else {
4074 if (shift_size < 0) {
4075 if (shift_size == -32) {
4076 __ dsra32(address, key, 0);
4077 } else {
4078 __ dsra(address, key, -shift_size);
4079 }
4080 } else {
4081 __ dsll(address, key, shift_size);
4082 }
4083 __ Daddu(address, external_pointer, address);
4084 }
4085
4086 if (elements_kind == FLOAT32_ELEMENTS) {
4087 __ cvt_s_d(double_scratch0(), value);
4088 __ swc1(double_scratch0(), MemOperand(address, base_offset));
4089 } else { // Storing doubles, not floats.
4090 __ sdc1(value, MemOperand(address, base_offset));
4091 }
4092 } else {
4093 Register value(ToRegister(instr->value()));
4094 MemOperand mem_operand = PrepareKeyedOperand(
4095 key, external_pointer, key_is_constant, constant_key,
4096 element_size_shift, shift_size,
4097 base_offset);
4098 switch (elements_kind) {
4099 case UINT8_ELEMENTS:
4100 case UINT8_CLAMPED_ELEMENTS:
4101 case INT8_ELEMENTS:
4102 __ sb(value, mem_operand);
4103 break;
4104 case INT16_ELEMENTS:
4105 case UINT16_ELEMENTS:
4106 __ sh(value, mem_operand);
4107 break;
4108 case INT32_ELEMENTS:
4109 case UINT32_ELEMENTS:
4110 __ sw(value, mem_operand);
4111 break;
4112 case FLOAT32_ELEMENTS:
4113 case FLOAT64_ELEMENTS:
4114 case FAST_DOUBLE_ELEMENTS:
4115 case FAST_ELEMENTS:
4116 case FAST_SMI_ELEMENTS:
4117 case FAST_HOLEY_DOUBLE_ELEMENTS:
4118 case FAST_HOLEY_ELEMENTS:
4119 case FAST_HOLEY_SMI_ELEMENTS:
4120 case DICTIONARY_ELEMENTS:
4121 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4122 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01004123 case FAST_STRING_WRAPPER_ELEMENTS:
4124 case SLOW_STRING_WRAPPER_ELEMENTS:
4125 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004126 UNREACHABLE();
4127 break;
4128 }
4129 }
4130}
4131
4132
4133void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4134 DoubleRegister value = ToDoubleRegister(instr->value());
4135 Register elements = ToRegister(instr->elements());
4136 Register scratch = scratch0();
4137 DoubleRegister double_scratch = double_scratch0();
4138 bool key_is_constant = instr->key()->IsConstantOperand();
4139 int base_offset = instr->base_offset();
4140 Label not_nan, done;
4141
4142 // Calculate the effective address of the slot in the array to store the
4143 // double value.
4144 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4145 if (key_is_constant) {
4146 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4147 if (constant_key & 0xF0000000) {
4148 Abort(kArrayIndexConstantValueTooBig);
4149 }
4150 __ Daddu(scratch, elements,
4151 Operand((constant_key << element_size_shift) + base_offset));
4152 } else {
4153 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4154 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
4155 : element_size_shift;
4156 __ Daddu(scratch, elements, Operand(base_offset));
4157 DCHECK((shift_size == 3) || (shift_size == -29));
4158 if (shift_size == 3) {
4159 __ dsll(at, ToRegister(instr->key()), 3);
4160 } else if (shift_size == -29) {
4161 __ dsra(at, ToRegister(instr->key()), 29);
4162 }
4163 __ Daddu(scratch, scratch, at);
4164 }
4165
4166 if (instr->NeedsCanonicalization()) {
4167 __ FPUCanonicalizeNaN(double_scratch, value);
4168 __ sdc1(double_scratch, MemOperand(scratch, 0));
4169 } else {
4170 __ sdc1(value, MemOperand(scratch, 0));
4171 }
4172}
4173
4174
4175void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4176 Register value = ToRegister(instr->value());
4177 Register elements = ToRegister(instr->elements());
4178 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4179 : no_reg;
4180 Register scratch = scratch0();
4181 Register store_base = scratch;
4182 int offset = instr->base_offset();
4183
4184 // Do the store.
4185 if (instr->key()->IsConstantOperand()) {
4186 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4187 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4188 offset += ToInteger32(const_operand) * kPointerSize;
4189 store_base = elements;
4190 } else {
4191 // Even though the HLoadKeyed instruction forces the input
4192 // representation for the key to be an integer, the input gets replaced
4193 // during bound check elimination with the index argument to the bounds
4194 // check, which can be tagged, so that case must be handled here, too.
4195 if (instr->hydrogen()->key()->representation().IsSmi()) {
4196 __ SmiScale(scratch, key, kPointerSizeLog2);
4197 __ daddu(store_base, elements, scratch);
4198 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004199 __ Dlsa(store_base, elements, key, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004200 }
4201 }
4202
4203 Representation representation = instr->hydrogen()->value()->representation();
4204 if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4205 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4206 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
4207 if (FLAG_debug_code) {
4208 Register temp = scratch1();
4209 __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
4210 __ AssertSmi(temp);
4211 }
4212
4213 // Store int value directly to upper half of the smi.
4214 STATIC_ASSERT(kSmiTag == 0);
4215 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4216 offset = SmiWordOffset(offset);
4217 representation = Representation::Integer32();
4218 }
4219
4220 __ Store(value, MemOperand(store_base, offset), representation);
4221
4222 if (instr->hydrogen()->NeedsWriteBarrier()) {
4223 SmiCheck check_needed =
4224 instr->hydrogen()->value()->type().IsHeapObject()
4225 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4226 // Compute address of modified element and store it into key register.
4227 __ Daddu(key, store_base, Operand(offset));
4228 __ RecordWrite(elements,
4229 key,
4230 value,
4231 GetRAState(),
4232 kSaveFPRegs,
4233 EMIT_REMEMBERED_SET,
4234 check_needed,
4235 instr->hydrogen()->PointersToHereCheckForValue());
4236 }
4237}
4238
4239
4240void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4241 // By cases: external, fast double
4242 if (instr->is_fixed_typed_array()) {
4243 DoStoreKeyedExternalArray(instr);
4244 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4245 DoStoreKeyedFixedDoubleArray(instr);
4246 } else {
4247 DoStoreKeyedFixedArray(instr);
4248 }
4249}
4250
4251
4252void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4253 DCHECK(ToRegister(instr->context()).is(cp));
4254 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4255 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4256 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4257
Ben Murdoch61f157c2016-09-16 13:49:30 +01004258 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004259
4260 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
Ben Murdoch61f157c2016-09-16 13:49:30 +01004261 isolate(), instr->language_mode())
4262 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004263 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4264}
4265
4266
4267void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4268 class DeferredMaybeGrowElements final : public LDeferredCode {
4269 public:
4270 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4271 : LDeferredCode(codegen), instr_(instr) {}
4272 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4273 LInstruction* instr() override { return instr_; }
4274
4275 private:
4276 LMaybeGrowElements* instr_;
4277 };
4278
4279 Register result = v0;
4280 DeferredMaybeGrowElements* deferred =
4281 new (zone()) DeferredMaybeGrowElements(this, instr);
4282 LOperand* key = instr->key();
4283 LOperand* current_capacity = instr->current_capacity();
4284
4285 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4286 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4287 DCHECK(key->IsConstantOperand() || key->IsRegister());
4288 DCHECK(current_capacity->IsConstantOperand() ||
4289 current_capacity->IsRegister());
4290
4291 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4292 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4293 int32_t constant_capacity =
4294 ToInteger32(LConstantOperand::cast(current_capacity));
4295 if (constant_key >= constant_capacity) {
4296 // Deferred case.
4297 __ jmp(deferred->entry());
4298 }
4299 } else if (key->IsConstantOperand()) {
4300 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4301 __ Branch(deferred->entry(), le, ToRegister(current_capacity),
4302 Operand(constant_key));
4303 } else if (current_capacity->IsConstantOperand()) {
4304 int32_t constant_capacity =
4305 ToInteger32(LConstantOperand::cast(current_capacity));
4306 __ Branch(deferred->entry(), ge, ToRegister(key),
4307 Operand(constant_capacity));
4308 } else {
4309 __ Branch(deferred->entry(), ge, ToRegister(key),
4310 Operand(ToRegister(current_capacity)));
4311 }
4312
4313 if (instr->elements()->IsRegister()) {
4314 __ mov(result, ToRegister(instr->elements()));
4315 } else {
4316 __ ld(result, ToMemOperand(instr->elements()));
4317 }
4318
4319 __ bind(deferred->exit());
4320}
4321
4322
4323void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4324 // TODO(3095996): Get rid of this. For now, we need to make the
4325 // result register contain a valid pointer because it is already
4326 // contained in the register pointer map.
4327 Register result = v0;
4328 __ mov(result, zero_reg);
4329
4330 // We have to call a stub.
4331 {
4332 PushSafepointRegistersScope scope(this);
4333 if (instr->object()->IsRegister()) {
4334 __ mov(result, ToRegister(instr->object()));
4335 } else {
4336 __ ld(result, ToMemOperand(instr->object()));
4337 }
4338
4339 LOperand* key = instr->key();
4340 if (key->IsConstantOperand()) {
Ben Murdochc5610432016-08-08 18:44:38 +01004341 LConstantOperand* constant_key = LConstantOperand::cast(key);
4342 int32_t int_key = ToInteger32(constant_key);
4343 if (Smi::IsValid(int_key)) {
4344 __ li(a3, Operand(Smi::FromInt(int_key)));
4345 } else {
4346 // We should never get here at runtime because there is a smi check on
4347 // the key before this point.
4348 __ stop("expected smi");
4349 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004350 } else {
4351 __ mov(a3, ToRegister(key));
4352 __ SmiTag(a3);
4353 }
4354
4355 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4356 instr->hydrogen()->kind());
4357 __ mov(a0, result);
4358 __ CallStub(&stub);
4359 RecordSafepointWithLazyDeopt(
4360 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4361 __ StoreToSafepointRegisterSlot(result, result);
4362 }
4363
4364 // Deopt on smi, which means the elements array changed to dictionary mode.
4365 __ SmiTst(result, at);
4366 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
4367}
4368
4369
4370void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4371 Register object_reg = ToRegister(instr->object());
4372 Register scratch = scratch0();
4373
4374 Handle<Map> from_map = instr->original_map();
4375 Handle<Map> to_map = instr->transitioned_map();
4376 ElementsKind from_kind = instr->from_kind();
4377 ElementsKind to_kind = instr->to_kind();
4378
4379 Label not_applicable;
4380 __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4381 __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4382
4383 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4384 Register new_map_reg = ToRegister(instr->new_map_temp());
4385 __ li(new_map_reg, Operand(to_map));
4386 __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4387 // Write barrier.
4388 __ RecordWriteForMap(object_reg,
4389 new_map_reg,
4390 scratch,
4391 GetRAState(),
4392 kDontSaveFPRegs);
4393 } else {
4394 DCHECK(object_reg.is(a0));
4395 DCHECK(ToRegister(instr->context()).is(cp));
4396 PushSafepointRegistersScope scope(this);
4397 __ li(a1, Operand(to_map));
Ben Murdoch61f157c2016-09-16 13:49:30 +01004398 TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004399 __ CallStub(&stub);
4400 RecordSafepointWithRegisters(
4401 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4402 }
4403 __ bind(&not_applicable);
4404}
4405
4406
4407void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4408 Register object = ToRegister(instr->object());
4409 Register temp = ToRegister(instr->temp());
4410 Label no_memento_found;
Ben Murdochda12d292016-06-02 14:46:10 +01004411 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004412 DeoptimizeIf(al, instr, Deoptimizer::kMementoFound);
4413 __ bind(&no_memento_found);
4414}
4415
4416
4417void LCodeGen::DoStringAdd(LStringAdd* instr) {
4418 DCHECK(ToRegister(instr->context()).is(cp));
4419 DCHECK(ToRegister(instr->left()).is(a1));
4420 DCHECK(ToRegister(instr->right()).is(a0));
4421 StringAddStub stub(isolate(),
4422 instr->hydrogen()->flags(),
4423 instr->hydrogen()->pretenure_flag());
4424 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4425}
4426
4427
4428void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4429 class DeferredStringCharCodeAt final : public LDeferredCode {
4430 public:
4431 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4432 : LDeferredCode(codegen), instr_(instr) { }
4433 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4434 LInstruction* instr() override { return instr_; }
4435
4436 private:
4437 LStringCharCodeAt* instr_;
4438 };
4439
4440 DeferredStringCharCodeAt* deferred =
4441 new(zone()) DeferredStringCharCodeAt(this, instr);
4442 StringCharLoadGenerator::Generate(masm(),
4443 ToRegister(instr->string()),
4444 ToRegister(instr->index()),
4445 ToRegister(instr->result()),
4446 deferred->entry());
4447 __ bind(deferred->exit());
4448}
4449
4450
4451void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4452 Register string = ToRegister(instr->string());
4453 Register result = ToRegister(instr->result());
4454 Register scratch = scratch0();
4455
4456 // TODO(3095996): Get rid of this. For now, we need to make the
4457 // result register contain a valid pointer because it is already
4458 // contained in the register pointer map.
4459 __ mov(result, zero_reg);
4460
4461 PushSafepointRegistersScope scope(this);
4462 __ push(string);
4463 // Push the index as a smi. This is safe because of the checks in
4464 // DoStringCharCodeAt above.
4465 if (instr->index()->IsConstantOperand()) {
4466 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4467 __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4468 __ push(scratch);
4469 } else {
4470 Register index = ToRegister(instr->index());
4471 __ SmiTag(index);
4472 __ push(index);
4473 }
4474 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4475 instr->context());
4476 __ AssertSmi(v0);
4477 __ SmiUntag(v0);
4478 __ StoreToSafepointRegisterSlot(v0, result);
4479}
4480
4481
4482void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4483 class DeferredStringCharFromCode final : public LDeferredCode {
4484 public:
4485 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4486 : LDeferredCode(codegen), instr_(instr) { }
4487 void Generate() override {
4488 codegen()->DoDeferredStringCharFromCode(instr_);
4489 }
4490 LInstruction* instr() override { return instr_; }
4491
4492 private:
4493 LStringCharFromCode* instr_;
4494 };
4495
4496 DeferredStringCharFromCode* deferred =
4497 new(zone()) DeferredStringCharFromCode(this, instr);
4498
4499 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4500 Register char_code = ToRegister(instr->char_code());
4501 Register result = ToRegister(instr->result());
4502 Register scratch = scratch0();
4503 DCHECK(!char_code.is(result));
4504
4505 __ Branch(deferred->entry(), hi,
4506 char_code, Operand(String::kMaxOneByteCharCode));
4507 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004508 __ Dlsa(result, result, char_code, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004509 __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4510 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4511 __ Branch(deferred->entry(), eq, result, Operand(scratch));
4512 __ bind(deferred->exit());
4513}
4514
4515
4516void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4517 Register char_code = ToRegister(instr->char_code());
4518 Register result = ToRegister(instr->result());
4519
4520 // TODO(3095996): Get rid of this. For now, we need to make the
4521 // result register contain a valid pointer because it is already
4522 // contained in the register pointer map.
4523 __ mov(result, zero_reg);
4524
4525 PushSafepointRegistersScope scope(this);
4526 __ SmiTag(char_code);
4527 __ push(char_code);
4528 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4529 instr->context());
4530 __ StoreToSafepointRegisterSlot(v0, result);
4531}
4532
4533
4534void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4535 LOperand* input = instr->value();
4536 DCHECK(input->IsRegister() || input->IsStackSlot());
4537 LOperand* output = instr->result();
4538 DCHECK(output->IsDoubleRegister());
4539 FPURegister single_scratch = double_scratch0().low();
4540 if (input->IsStackSlot()) {
4541 Register scratch = scratch0();
4542 __ ld(scratch, ToMemOperand(input));
4543 __ mtc1(scratch, single_scratch);
4544 } else {
4545 __ mtc1(ToRegister(input), single_scratch);
4546 }
4547 __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4548}
4549
4550
4551void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4552 LOperand* input = instr->value();
4553 LOperand* output = instr->result();
4554
4555 FPURegister dbl_scratch = double_scratch0();
4556 __ mtc1(ToRegister(input), dbl_scratch);
4557 __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch);
4558}
4559
4560
4561void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4562 class DeferredNumberTagU final : public LDeferredCode {
4563 public:
4564 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4565 : LDeferredCode(codegen), instr_(instr) { }
4566 void Generate() override {
4567 codegen()->DoDeferredNumberTagIU(instr_,
4568 instr_->value(),
4569 instr_->temp1(),
4570 instr_->temp2(),
4571 UNSIGNED_INT32);
4572 }
4573 LInstruction* instr() override { return instr_; }
4574
4575 private:
4576 LNumberTagU* instr_;
4577 };
4578
4579 Register input = ToRegister(instr->value());
4580 Register result = ToRegister(instr->result());
4581
4582 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4583 __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4584 __ SmiTag(result, input);
4585 __ bind(deferred->exit());
4586}
4587
4588
4589void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4590 LOperand* value,
4591 LOperand* temp1,
4592 LOperand* temp2,
4593 IntegerSignedness signedness) {
4594 Label done, slow;
4595 Register src = ToRegister(value);
4596 Register dst = ToRegister(instr->result());
4597 Register tmp1 = scratch0();
4598 Register tmp2 = ToRegister(temp1);
4599 Register tmp3 = ToRegister(temp2);
4600 DoubleRegister dbl_scratch = double_scratch0();
4601
4602 if (signedness == SIGNED_INT32) {
4603 // There was overflow, so bits 30 and 31 of the original integer
4604 // disagree. Try to allocate a heap number in new space and store
4605 // the value in there. If that fails, call the runtime system.
4606 if (dst.is(src)) {
4607 __ SmiUntag(src, dst);
4608 __ Xor(src, src, Operand(0x80000000));
4609 }
4610 __ mtc1(src, dbl_scratch);
4611 __ cvt_d_w(dbl_scratch, dbl_scratch);
4612 } else {
4613 __ mtc1(src, dbl_scratch);
4614 __ Cvt_d_uw(dbl_scratch, dbl_scratch);
4615 }
4616
4617 if (FLAG_inline_new) {
4618 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
Ben Murdochc5610432016-08-08 18:44:38 +01004619 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004620 __ Branch(&done);
4621 }
4622
4623 // Slow case: Call the runtime system to do the number allocation.
4624 __ bind(&slow);
4625 {
4626 // TODO(3095996): Put a valid pointer value in the stack slot where the
4627 // result register is stored, as this register is in the pointer map, but
4628 // contains an integer value.
4629 __ mov(dst, zero_reg);
4630 // Preserve the value of all registers.
4631 PushSafepointRegistersScope scope(this);
4632
4633 // NumberTagI and NumberTagD use the context from the frame, rather than
4634 // the environment's HContext or HInlinedContext value.
4635 // They only call Runtime::kAllocateHeapNumber.
4636 // The corresponding HChange instructions are added in a phase that does
4637 // not have easy access to the local context.
4638 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4639 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4640 RecordSafepointWithRegisters(
4641 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4642 __ StoreToSafepointRegisterSlot(v0, dst);
4643 }
4644
4645 // Done. Put the value in dbl_scratch into the value of the allocated heap
4646 // number.
4647 __ bind(&done);
4648 __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4649}
4650
4651
4652void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4653 class DeferredNumberTagD final : public LDeferredCode {
4654 public:
4655 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4656 : LDeferredCode(codegen), instr_(instr) { }
4657 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4658 LInstruction* instr() override { return instr_; }
4659
4660 private:
4661 LNumberTagD* instr_;
4662 };
4663
4664 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4665 Register scratch = scratch0();
4666 Register reg = ToRegister(instr->result());
4667 Register temp1 = ToRegister(instr->temp());
4668 Register temp2 = ToRegister(instr->temp2());
4669
4670 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4671 if (FLAG_inline_new) {
4672 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4673 // We want the untagged address first for performance
Ben Murdochc5610432016-08-08 18:44:38 +01004674 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004675 } else {
4676 __ Branch(deferred->entry());
4677 }
4678 __ bind(deferred->exit());
Ben Murdochc5610432016-08-08 18:44:38 +01004679 __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004680}
4681
4682
4683void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4684 // TODO(3095996): Get rid of this. For now, we need to make the
4685 // result register contain a valid pointer because it is already
4686 // contained in the register pointer map.
4687 Register reg = ToRegister(instr->result());
4688 __ mov(reg, zero_reg);
4689
4690 PushSafepointRegistersScope scope(this);
4691 // NumberTagI and NumberTagD use the context from the frame, rather than
4692 // the environment's HContext or HInlinedContext value.
4693 // They only call Runtime::kAllocateHeapNumber.
4694 // The corresponding HChange instructions are added in a phase that does
4695 // not have easy access to the local context.
4696 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4697 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4698 RecordSafepointWithRegisters(
4699 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004700 __ StoreToSafepointRegisterSlot(v0, reg);
4701}
4702
4703
4704void LCodeGen::DoSmiTag(LSmiTag* instr) {
4705 HChange* hchange = instr->hydrogen();
4706 Register input = ToRegister(instr->value());
4707 Register output = ToRegister(instr->result());
4708 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4709 hchange->value()->CheckFlag(HValue::kUint32)) {
4710 __ And(at, input, Operand(0x80000000));
4711 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4712 }
4713 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4714 !hchange->value()->CheckFlag(HValue::kUint32)) {
4715 __ SmiTagCheckOverflow(output, input, at);
4716 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4717 } else {
4718 __ SmiTag(output, input);
4719 }
4720}
4721
4722
4723void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4724 Register scratch = scratch0();
4725 Register input = ToRegister(instr->value());
4726 Register result = ToRegister(instr->result());
4727 if (instr->needs_check()) {
4728 STATIC_ASSERT(kHeapObjectTag == 1);
4729 // If the input is a HeapObject, value of scratch won't be zero.
4730 __ And(scratch, input, Operand(kHeapObjectTag));
4731 __ SmiUntag(result, input);
4732 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
4733 } else {
4734 __ SmiUntag(result, input);
4735 }
4736}
4737
4738
4739void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4740 DoubleRegister result_reg,
4741 NumberUntagDMode mode) {
4742 bool can_convert_undefined_to_nan =
4743 instr->hydrogen()->can_convert_undefined_to_nan();
4744 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4745
4746 Register scratch = scratch0();
4747 Label convert, load_smi, done;
4748 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4749 // Smi check.
4750 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4751 // Heap number map check.
4752 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4753 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4754 if (can_convert_undefined_to_nan) {
4755 __ Branch(&convert, ne, scratch, Operand(at));
4756 } else {
4757 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
4758 Operand(at));
4759 }
4760 // Load heap number.
4761 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4762 if (deoptimize_on_minus_zero) {
4763 __ mfc1(at, result_reg);
4764 __ Branch(&done, ne, at, Operand(zero_reg));
4765 __ mfhc1(scratch, result_reg); // Get exponent/sign bits.
4766 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
4767 Operand(HeapNumber::kSignMask));
4768 }
4769 __ Branch(&done);
4770 if (can_convert_undefined_to_nan) {
4771 __ bind(&convert);
4772 // Convert undefined (and hole) to NaN.
4773 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4774 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
4775 Operand(at));
4776 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4777 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4778 __ Branch(&done);
4779 }
4780 } else {
4781 __ SmiUntag(scratch, input_reg);
4782 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4783 }
4784 // Smi to double register conversion
4785 __ bind(&load_smi);
4786 // scratch: untagged value of input_reg
4787 __ mtc1(scratch, result_reg);
4788 __ cvt_d_w(result_reg, result_reg);
4789 __ bind(&done);
4790}
4791
4792
4793void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4794 Register input_reg = ToRegister(instr->value());
4795 Register scratch1 = scratch0();
4796 Register scratch2 = ToRegister(instr->temp());
4797 DoubleRegister double_scratch = double_scratch0();
4798 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4799
4800 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4801 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4802
4803 Label done;
4804
4805 // The input is a tagged HeapObject.
4806 // Heap number map check.
4807 __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4808 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4809 // This 'at' value and scratch1 map value are used for tests in both clauses
4810 // of the if.
4811
4812 if (instr->truncating()) {
4813 // Performs a truncating conversion of a floating point number as used by
4814 // the JS bitwise operations.
4815 Label no_heap_number, check_bools, check_false;
4816 // Check HeapNumber map.
4817 __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
4818 __ mov(scratch2, input_reg); // In delay slot.
4819 __ TruncateHeapNumberToI(input_reg, scratch2);
4820 __ Branch(&done);
4821
4822 // Check for Oddballs. Undefined/False is converted to zero and True to one
4823 // for truncating conversions.
4824 __ bind(&no_heap_number);
4825 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4826 __ Branch(&check_bools, ne, input_reg, Operand(at));
4827 DCHECK(ToRegister(instr->result()).is(input_reg));
4828 __ Branch(USE_DELAY_SLOT, &done);
4829 __ mov(input_reg, zero_reg); // In delay slot.
4830
4831 __ bind(&check_bools);
4832 __ LoadRoot(at, Heap::kTrueValueRootIndex);
4833 __ Branch(&check_false, ne, scratch2, Operand(at));
4834 __ Branch(USE_DELAY_SLOT, &done);
4835 __ li(input_reg, Operand(1)); // In delay slot.
4836
4837 __ bind(&check_false);
4838 __ LoadRoot(at, Heap::kFalseValueRootIndex);
4839 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
4840 scratch2, Operand(at));
4841 __ Branch(USE_DELAY_SLOT, &done);
4842 __ mov(input_reg, zero_reg); // In delay slot.
4843 } else {
4844 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
4845 Operand(at));
4846
4847 // Load the double value.
4848 __ ldc1(double_scratch,
4849 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4850
4851 Register except_flag = scratch2;
4852 __ EmitFPUTruncate(kRoundToZero,
4853 input_reg,
4854 double_scratch,
4855 scratch1,
4856 double_scratch2,
4857 except_flag,
4858 kCheckForInexactConversion);
4859
4860 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4861 Operand(zero_reg));
4862
4863 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4864 __ Branch(&done, ne, input_reg, Operand(zero_reg));
4865
4866 __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits.
4867 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4868 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
4869 Operand(zero_reg));
4870 }
4871 }
4872 __ bind(&done);
4873}
4874
4875
4876void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4877 class DeferredTaggedToI final : public LDeferredCode {
4878 public:
4879 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4880 : LDeferredCode(codegen), instr_(instr) { }
4881 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4882 LInstruction* instr() override { return instr_; }
4883
4884 private:
4885 LTaggedToI* instr_;
4886 };
4887
4888 LOperand* input = instr->value();
4889 DCHECK(input->IsRegister());
4890 DCHECK(input->Equals(instr->result()));
4891
4892 Register input_reg = ToRegister(input);
4893
4894 if (instr->hydrogen()->value()->representation().IsSmi()) {
4895 __ SmiUntag(input_reg);
4896 } else {
4897 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4898
4899 // Let the deferred code handle the HeapObject case.
4900 __ JumpIfNotSmi(input_reg, deferred->entry());
4901
4902 // Smi to int32 conversion.
4903 __ SmiUntag(input_reg);
4904 __ bind(deferred->exit());
4905 }
4906}
4907
4908
4909void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4910 LOperand* input = instr->value();
4911 DCHECK(input->IsRegister());
4912 LOperand* result = instr->result();
4913 DCHECK(result->IsDoubleRegister());
4914
4915 Register input_reg = ToRegister(input);
4916 DoubleRegister result_reg = ToDoubleRegister(result);
4917
4918 HValue* value = instr->hydrogen()->value();
4919 NumberUntagDMode mode = value->representation().IsSmi()
4920 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4921
4922 EmitNumberUntagD(instr, input_reg, result_reg, mode);
4923}
4924
4925
4926void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4927 Register result_reg = ToRegister(instr->result());
4928 Register scratch1 = scratch0();
4929 DoubleRegister double_input = ToDoubleRegister(instr->value());
4930
4931 if (instr->truncating()) {
4932 __ TruncateDoubleToI(result_reg, double_input);
4933 } else {
4934 Register except_flag = LCodeGen::scratch1();
4935
4936 __ EmitFPUTruncate(kRoundToMinusInf,
4937 result_reg,
4938 double_input,
4939 scratch1,
4940 double_scratch0(),
4941 except_flag,
4942 kCheckForInexactConversion);
4943
4944 // Deopt if the operation did not succeed (except_flag != 0).
4945 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4946 Operand(zero_reg));
4947
4948 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4949 Label done;
4950 __ Branch(&done, ne, result_reg, Operand(zero_reg));
4951 __ mfhc1(scratch1, double_input); // Get exponent/sign bits.
4952 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4953 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
4954 Operand(zero_reg));
4955 __ bind(&done);
4956 }
4957 }
4958}
4959
4960
4961void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4962 Register result_reg = ToRegister(instr->result());
4963 Register scratch1 = LCodeGen::scratch0();
4964 DoubleRegister double_input = ToDoubleRegister(instr->value());
4965
4966 if (instr->truncating()) {
4967 __ TruncateDoubleToI(result_reg, double_input);
4968 } else {
4969 Register except_flag = LCodeGen::scratch1();
4970
4971 __ EmitFPUTruncate(kRoundToMinusInf,
4972 result_reg,
4973 double_input,
4974 scratch1,
4975 double_scratch0(),
4976 except_flag,
4977 kCheckForInexactConversion);
4978
4979 // Deopt if the operation did not succeed (except_flag != 0).
4980 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4981 Operand(zero_reg));
4982
4983 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4984 Label done;
4985 __ Branch(&done, ne, result_reg, Operand(zero_reg));
4986 __ mfhc1(scratch1, double_input); // Get exponent/sign bits.
4987 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4988 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
4989 Operand(zero_reg));
4990 __ bind(&done);
4991 }
4992 }
4993 __ SmiTag(result_reg, result_reg);
4994}
4995
4996
4997void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4998 LOperand* input = instr->value();
4999 __ SmiTst(ToRegister(input), at);
5000 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
5001}
5002
5003
5004void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5005 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5006 LOperand* input = instr->value();
5007 __ SmiTst(ToRegister(input), at);
5008 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
5009 }
5010}
5011
5012
5013void LCodeGen::DoCheckArrayBufferNotNeutered(
5014 LCheckArrayBufferNotNeutered* instr) {
5015 Register view = ToRegister(instr->view());
5016 Register scratch = scratch0();
5017
5018 __ ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
5019 __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
5020 __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
5021 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg));
5022}
5023
5024
5025void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5026 Register input = ToRegister(instr->value());
5027 Register scratch = scratch0();
5028
5029 __ GetObjectType(input, scratch, scratch);
5030
5031 if (instr->hydrogen()->is_interval_check()) {
5032 InstanceType first;
5033 InstanceType last;
5034 instr->hydrogen()->GetCheckInterval(&first, &last);
5035
5036 // If there is only one type in the interval check for equality.
5037 if (first == last) {
5038 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
5039 Operand(first));
5040 } else {
5041 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
5042 Operand(first));
5043 // Omit check for the last type.
5044 if (last != LAST_TYPE) {
5045 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
5046 Operand(last));
5047 }
5048 }
5049 } else {
5050 uint8_t mask;
5051 uint8_t tag;
5052 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5053
5054 if (base::bits::IsPowerOfTwo32(mask)) {
5055 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5056 __ And(at, scratch, mask);
5057 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
5058 at, Operand(zero_reg));
5059 } else {
5060 __ And(scratch, scratch, Operand(mask));
5061 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
5062 Operand(tag));
5063 }
5064 }
5065}
5066
5067
5068void LCodeGen::DoCheckValue(LCheckValue* instr) {
5069 Register reg = ToRegister(instr->value());
5070 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5071 AllowDeferredHandleDereference smi_check;
5072 if (isolate()->heap()->InNewSpace(*object)) {
5073 Register reg = ToRegister(instr->value());
5074 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5075 __ li(at, Operand(cell));
5076 __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
5077 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
5078 } else {
5079 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
5080 }
5081}
5082
5083
5084void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5085 {
5086 PushSafepointRegistersScope scope(this);
5087 __ push(object);
5088 __ mov(cp, zero_reg);
5089 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5090 RecordSafepointWithRegisters(
5091 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5092 __ StoreToSafepointRegisterSlot(v0, scratch0());
5093 }
5094 __ SmiTst(scratch0(), at);
5095 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
5096 Operand(zero_reg));
5097}
5098
5099
5100void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5101 class DeferredCheckMaps final : public LDeferredCode {
5102 public:
5103 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5104 : LDeferredCode(codegen), instr_(instr), object_(object) {
5105 SetExit(check_maps());
5106 }
5107 void Generate() override {
5108 codegen()->DoDeferredInstanceMigration(instr_, object_);
5109 }
5110 Label* check_maps() { return &check_maps_; }
5111 LInstruction* instr() override { return instr_; }
5112
5113 private:
5114 LCheckMaps* instr_;
5115 Label check_maps_;
5116 Register object_;
5117 };
5118
5119 if (instr->hydrogen()->IsStabilityCheck()) {
5120 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5121 for (int i = 0; i < maps->size(); ++i) {
5122 AddStabilityDependency(maps->at(i).handle());
5123 }
5124 return;
5125 }
5126
5127 Register map_reg = scratch0();
5128 LOperand* input = instr->value();
5129 DCHECK(input->IsRegister());
5130 Register reg = ToRegister(input);
5131 __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5132
5133 DeferredCheckMaps* deferred = NULL;
5134 if (instr->hydrogen()->HasMigrationTarget()) {
5135 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5136 __ bind(deferred->check_maps());
5137 }
5138
5139 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5140 Label success;
5141 for (int i = 0; i < maps->size() - 1; i++) {
5142 Handle<Map> map = maps->at(i).handle();
5143 __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5144 }
5145 Handle<Map> map = maps->at(maps->size() - 1).handle();
5146 // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5147 if (instr->hydrogen()->HasMigrationTarget()) {
5148 __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5149 } else {
5150 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
5151 }
5152
5153 __ bind(&success);
5154}
5155
5156
5157void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5158 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5159 Register result_reg = ToRegister(instr->result());
5160 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5161 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5162}
5163
5164
5165void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5166 Register unclamped_reg = ToRegister(instr->unclamped());
5167 Register result_reg = ToRegister(instr->result());
5168 __ ClampUint8(result_reg, unclamped_reg);
5169}
5170
5171
5172void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5173 Register scratch = scratch0();
5174 Register input_reg = ToRegister(instr->unclamped());
5175 Register result_reg = ToRegister(instr->result());
5176 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5177 Label is_smi, done, heap_number;
5178
5179 // Both smi and heap number cases are handled.
5180 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5181
5182 // Check for heap number
5183 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5184 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5185
5186 // Check for undefined. Undefined is converted to zero for clamping
5187 // conversions.
5188 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
5189 Operand(factory()->undefined_value()));
5190 __ mov(result_reg, zero_reg);
5191 __ jmp(&done);
5192
5193 // Heap number
5194 __ bind(&heap_number);
5195 __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5196 HeapNumber::kValueOffset));
5197 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5198 __ jmp(&done);
5199
5200 __ bind(&is_smi);
5201 __ ClampUint8(result_reg, scratch);
5202
5203 __ bind(&done);
5204}
5205
5206
5207void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5208 DoubleRegister value_reg = ToDoubleRegister(instr->value());
5209 Register result_reg = ToRegister(instr->result());
5210 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5211 __ FmoveHigh(result_reg, value_reg);
5212 } else {
5213 __ FmoveLow(result_reg, value_reg);
5214 }
5215}
5216
5217
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005218void LCodeGen::DoAllocate(LAllocate* instr) {
5219 class DeferredAllocate final : public LDeferredCode {
5220 public:
5221 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5222 : LDeferredCode(codegen), instr_(instr) { }
5223 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5224 LInstruction* instr() override { return instr_; }
5225
5226 private:
5227 LAllocate* instr_;
5228 };
5229
5230 DeferredAllocate* deferred =
5231 new(zone()) DeferredAllocate(this, instr);
5232
5233 Register result = ToRegister(instr->result());
5234 Register scratch = ToRegister(instr->temp1());
5235 Register scratch2 = ToRegister(instr->temp2());
5236
5237 // Allocate memory for the object.
Ben Murdochc5610432016-08-08 18:44:38 +01005238 AllocationFlags flags = NO_ALLOCATION_FLAGS;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005239 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5240 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5241 }
5242 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5243 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5244 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5245 }
Ben Murdochc5610432016-08-08 18:44:38 +01005246
5247 if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5248 flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
5249 }
5250 DCHECK(!instr->hydrogen()->IsAllocationFolded());
5251
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005252 if (instr->size()->IsConstantOperand()) {
5253 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5254 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5255 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5256 } else {
5257 Register size = ToRegister(instr->size());
5258 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5259 }
5260
5261 __ bind(deferred->exit());
5262
5263 if (instr->hydrogen()->MustPrefillWithFiller()) {
5264 STATIC_ASSERT(kHeapObjectTag == 1);
5265 if (instr->size()->IsConstantOperand()) {
5266 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5267 __ li(scratch, Operand(size - kHeapObjectTag));
5268 } else {
5269 __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5270 }
5271 __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5272 Label loop;
5273 __ bind(&loop);
5274 __ Dsubu(scratch, scratch, Operand(kPointerSize));
5275 __ Daddu(at, result, Operand(scratch));
5276 __ sd(scratch2, MemOperand(at));
5277 __ Branch(&loop, ge, scratch, Operand(zero_reg));
5278 }
5279}
5280
5281
5282void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5283 Register result = ToRegister(instr->result());
5284
5285 // TODO(3095996): Get rid of this. For now, we need to make the
5286 // result register contain a valid pointer because it is already
5287 // contained in the register pointer map.
5288 __ mov(result, zero_reg);
5289
5290 PushSafepointRegistersScope scope(this);
5291 if (instr->size()->IsRegister()) {
5292 Register size = ToRegister(instr->size());
5293 DCHECK(!size.is(result));
5294 __ SmiTag(size);
5295 __ push(size);
5296 } else {
5297 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5298 if (size >= 0 && size <= Smi::kMaxValue) {
5299 __ li(v0, Operand(Smi::FromInt(size)));
5300 __ Push(v0);
5301 } else {
5302 // We should never get here at runtime => abort
5303 __ stop("invalid allocation size");
5304 return;
5305 }
5306 }
5307
5308 int flags = AllocateDoubleAlignFlag::encode(
5309 instr->hydrogen()->MustAllocateDoubleAligned());
5310 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5311 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5312 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5313 } else {
5314 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5315 }
5316 __ li(v0, Operand(Smi::FromInt(flags)));
5317 __ Push(v0);
5318
5319 CallRuntimeFromDeferred(
5320 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5321 __ StoreToSafepointRegisterSlot(v0, result);
Ben Murdochc5610432016-08-08 18:44:38 +01005322
5323 if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5324 AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
5325 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5326 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5327 allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
5328 }
5329 // If the allocation folding dominator allocate triggered a GC, allocation
5330 // happend in the runtime. We have to reset the top pointer to virtually
5331 // undo the allocation.
5332 ExternalReference allocation_top =
5333 AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
5334 Register top_address = scratch0();
5335 __ Dsubu(v0, v0, Operand(kHeapObjectTag));
5336 __ li(top_address, Operand(allocation_top));
5337 __ sd(v0, MemOperand(top_address));
5338 __ Daddu(v0, v0, Operand(kHeapObjectTag));
5339 }
5340}
5341
5342void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
5343 DCHECK(instr->hydrogen()->IsAllocationFolded());
5344 DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
5345 Register result = ToRegister(instr->result());
5346 Register scratch1 = ToRegister(instr->temp1());
5347 Register scratch2 = ToRegister(instr->temp2());
5348
5349 AllocationFlags flags = ALLOCATION_FOLDED;
5350 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5351 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5352 }
5353 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5354 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5355 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5356 }
5357 if (instr->size()->IsConstantOperand()) {
5358 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5359 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5360 __ FastAllocate(size, result, scratch1, scratch2, flags);
5361 } else {
5362 Register size = ToRegister(instr->size());
5363 __ FastAllocate(size, result, scratch1, scratch2, flags);
5364 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005365}
5366
5367
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005368void LCodeGen::DoTypeof(LTypeof* instr) {
5369 DCHECK(ToRegister(instr->value()).is(a3));
5370 DCHECK(ToRegister(instr->result()).is(v0));
5371 Label end, do_call;
5372 Register value_register = ToRegister(instr->value());
5373 __ JumpIfNotSmi(value_register, &do_call);
5374 __ li(v0, Operand(isolate()->factory()->number_string()));
5375 __ jmp(&end);
5376 __ bind(&do_call);
5377 TypeofStub stub(isolate());
5378 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5379 __ bind(&end);
5380}
5381
5382
5383void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5384 Register input = ToRegister(instr->value());
5385
5386 Register cmp1 = no_reg;
5387 Operand cmp2 = Operand(no_reg);
5388
5389 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5390 instr->FalseLabel(chunk_),
5391 input,
5392 instr->type_literal(),
5393 &cmp1,
5394 &cmp2);
5395
5396 DCHECK(cmp1.is_valid());
5397 DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
5398
5399 if (final_branch_condition != kNoCondition) {
5400 EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5401 }
5402}
5403
5404
5405Condition LCodeGen::EmitTypeofIs(Label* true_label,
5406 Label* false_label,
5407 Register input,
5408 Handle<String> type_name,
5409 Register* cmp1,
5410 Operand* cmp2) {
5411 // This function utilizes the delay slot heavily. This is used to load
5412 // values that are always usable without depending on the type of the input
5413 // register.
5414 Condition final_branch_condition = kNoCondition;
5415 Register scratch = scratch0();
5416 Factory* factory = isolate()->factory();
5417 if (String::Equals(type_name, factory->number_string())) {
5418 __ JumpIfSmi(input, true_label);
5419 __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5420 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5421 *cmp1 = input;
5422 *cmp2 = Operand(at);
5423 final_branch_condition = eq;
5424
5425 } else if (String::Equals(type_name, factory->string_string())) {
5426 __ JumpIfSmi(input, false_label);
5427 __ GetObjectType(input, input, scratch);
5428 *cmp1 = scratch;
5429 *cmp2 = Operand(FIRST_NONSTRING_TYPE);
5430 final_branch_condition = lt;
5431
5432 } else if (String::Equals(type_name, factory->symbol_string())) {
5433 __ JumpIfSmi(input, false_label);
5434 __ GetObjectType(input, input, scratch);
5435 *cmp1 = scratch;
5436 *cmp2 = Operand(SYMBOL_TYPE);
5437 final_branch_condition = eq;
5438
5439 } else if (String::Equals(type_name, factory->boolean_string())) {
5440 __ LoadRoot(at, Heap::kTrueValueRootIndex);
5441 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5442 __ LoadRoot(at, Heap::kFalseValueRootIndex);
5443 *cmp1 = at;
5444 *cmp2 = Operand(input);
5445 final_branch_condition = eq;
5446
5447 } else if (String::Equals(type_name, factory->undefined_string())) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005448 __ LoadRoot(at, Heap::kNullValueRootIndex);
5449 __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005450 // The first instruction of JumpIfSmi is an And - it is safe in the delay
5451 // slot.
5452 __ JumpIfSmi(input, false_label);
5453 // Check for undetectable objects => true.
5454 __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5455 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5456 __ And(at, at, 1 << Map::kIsUndetectable);
5457 *cmp1 = at;
5458 *cmp2 = Operand(zero_reg);
5459 final_branch_condition = ne;
5460
5461 } else if (String::Equals(type_name, factory->function_string())) {
5462 __ JumpIfSmi(input, false_label);
5463 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5464 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5465 __ And(scratch, scratch,
5466 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5467 *cmp1 = scratch;
5468 *cmp2 = Operand(1 << Map::kIsCallable);
5469 final_branch_condition = eq;
5470
5471 } else if (String::Equals(type_name, factory->object_string())) {
5472 __ JumpIfSmi(input, false_label);
5473 __ LoadRoot(at, Heap::kNullValueRootIndex);
5474 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5475 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5476 __ GetObjectType(input, scratch, scratch1());
5477 __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
5478 // Check for callable or undetectable objects => false.
5479 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5480 __ And(at, scratch,
5481 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5482 *cmp1 = at;
5483 *cmp2 = Operand(zero_reg);
5484 final_branch_condition = eq;
5485
5486// clang-format off
5487#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5488 } else if (String::Equals(type_name, factory->type##_string())) { \
5489 __ JumpIfSmi(input, false_label); \
5490 __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset)); \
5491 __ LoadRoot(at, Heap::k##Type##MapRootIndex); \
5492 *cmp1 = input; \
5493 *cmp2 = Operand(at); \
5494 final_branch_condition = eq;
5495 SIMD128_TYPES(SIMD128_TYPE)
5496#undef SIMD128_TYPE
5497 // clang-format on
5498
5499
5500 } else {
5501 *cmp1 = at;
5502 *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
5503 __ Branch(false_label);
5504 }
5505
5506 return final_branch_condition;
5507}
5508
5509
5510void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5511 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5512 // Ensure that we have enough space after the previous lazy-bailout
5513 // instruction for patching the code here.
5514 int current_pc = masm()->pc_offset();
5515 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5516 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5517 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5518 while (padding_size > 0) {
5519 __ nop();
5520 padding_size -= Assembler::kInstrSize;
5521 }
5522 }
5523 }
5524 last_lazy_deopt_pc_ = masm()->pc_offset();
5525}
5526
5527
5528void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5529 last_lazy_deopt_pc_ = masm()->pc_offset();
5530 DCHECK(instr->HasEnvironment());
5531 LEnvironment* env = instr->environment();
5532 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5533 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5534}
5535
5536
5537void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5538 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5539 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5540 // needed return address), even though the implementation of LAZY and EAGER is
5541 // now identical. When LAZY is eventually completely folded into EAGER, remove
5542 // the special case below.
5543 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5544 type = Deoptimizer::LAZY;
5545 }
5546
5547 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
5548 Operand(zero_reg));
5549}
5550
5551
5552void LCodeGen::DoDummy(LDummy* instr) {
5553 // Nothing to see here, move on!
5554}
5555
5556
5557void LCodeGen::DoDummyUse(LDummyUse* instr) {
5558 // Nothing to see here, move on!
5559}
5560
5561
5562void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5563 PushSafepointRegistersScope scope(this);
5564 LoadContextFromDeferred(instr->context());
5565 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5566 RecordSafepointWithLazyDeopt(
5567 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5568 DCHECK(instr->HasEnvironment());
5569 LEnvironment* env = instr->environment();
5570 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5571}
5572
5573
5574void LCodeGen::DoStackCheck(LStackCheck* instr) {
5575 class DeferredStackCheck final : public LDeferredCode {
5576 public:
5577 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5578 : LDeferredCode(codegen), instr_(instr) { }
5579 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5580 LInstruction* instr() override { return instr_; }
5581
5582 private:
5583 LStackCheck* instr_;
5584 };
5585
5586 DCHECK(instr->HasEnvironment());
5587 LEnvironment* env = instr->environment();
5588 // There is no LLazyBailout instruction for stack-checks. We have to
5589 // prepare for lazy deoptimization explicitly here.
5590 if (instr->hydrogen()->is_function_entry()) {
5591 // Perform stack overflow check.
5592 Label done;
5593 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5594 __ Branch(&done, hs, sp, Operand(at));
5595 DCHECK(instr->context()->IsRegister());
5596 DCHECK(ToRegister(instr->context()).is(cp));
5597 CallCode(isolate()->builtins()->StackCheck(),
5598 RelocInfo::CODE_TARGET,
5599 instr);
5600 __ bind(&done);
5601 } else {
5602 DCHECK(instr->hydrogen()->is_backwards_branch());
5603 // Perform stack overflow check if this goto needs it before jumping.
5604 DeferredStackCheck* deferred_stack_check =
5605 new(zone()) DeferredStackCheck(this, instr);
5606 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5607 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5608 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5609 __ bind(instr->done_label());
5610 deferred_stack_check->SetExit(instr->done_label());
5611 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5612 // Don't record a deoptimization index for the safepoint here.
5613 // This will be done explicitly when emitting call and the safepoint in
5614 // the deferred code.
5615 }
5616}
5617
5618
5619void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5620 // This is a pseudo-instruction that ensures that the environment here is
5621 // properly registered for deoptimization and records the assembler's PC
5622 // offset.
5623 LEnvironment* environment = instr->environment();
5624
5625 // If the environment were already registered, we would have no way of
5626 // backpatching it with the spill slot operands.
5627 DCHECK(!environment->HasBeenRegistered());
5628 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5629
5630 GenerateOsrPrologue();
5631}
5632
5633
5634void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5635 Register result = ToRegister(instr->result());
5636 Register object = ToRegister(instr->object());
5637
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005638 Label use_cache, call_runtime;
5639 DCHECK(object.is(a0));
Ben Murdoch097c5b22016-05-18 11:27:45 +01005640 __ CheckEnumCache(&call_runtime);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005641
5642 __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
5643 __ Branch(&use_cache);
5644
5645 // Get the set of properties to enumerate.
5646 __ bind(&call_runtime);
5647 __ push(object);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005648 CallRuntime(Runtime::kForInEnumerate, instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005649 __ bind(&use_cache);
5650}
5651
5652
5653void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5654 Register map = ToRegister(instr->map());
5655 Register result = ToRegister(instr->result());
5656 Label load_cache, done;
5657 __ EnumLength(result, map);
5658 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5659 __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5660 __ jmp(&done);
5661
5662 __ bind(&load_cache);
5663 __ LoadInstanceDescriptors(map, result);
5664 __ ld(result,
5665 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5666 __ ld(result,
5667 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5668 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
5669
5670 __ bind(&done);
5671}
5672
5673
5674void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5675 Register object = ToRegister(instr->value());
5676 Register map = ToRegister(instr->map());
5677 __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5678 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
5679}
5680
5681
5682void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5683 Register result,
5684 Register object,
5685 Register index) {
5686 PushSafepointRegistersScope scope(this);
5687 __ Push(object, index);
5688 __ mov(cp, zero_reg);
5689 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5690 RecordSafepointWithRegisters(
5691 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5692 __ StoreToSafepointRegisterSlot(v0, result);
5693}
5694
5695
5696void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5697 class DeferredLoadMutableDouble final : public LDeferredCode {
5698 public:
5699 DeferredLoadMutableDouble(LCodeGen* codegen,
5700 LLoadFieldByIndex* instr,
5701 Register result,
5702 Register object,
5703 Register index)
5704 : LDeferredCode(codegen),
5705 instr_(instr),
5706 result_(result),
5707 object_(object),
5708 index_(index) {
5709 }
5710 void Generate() override {
5711 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5712 }
5713 LInstruction* instr() override { return instr_; }
5714
5715 private:
5716 LLoadFieldByIndex* instr_;
5717 Register result_;
5718 Register object_;
5719 Register index_;
5720 };
5721
5722 Register object = ToRegister(instr->object());
5723 Register index = ToRegister(instr->index());
5724 Register result = ToRegister(instr->result());
5725 Register scratch = scratch0();
5726
5727 DeferredLoadMutableDouble* deferred;
5728 deferred = new(zone()) DeferredLoadMutableDouble(
5729 this, instr, result, object, index);
5730
5731 Label out_of_object, done;
5732
5733 __ And(scratch, index, Operand(Smi::FromInt(1)));
5734 __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
5735 __ dsra(index, index, 1);
5736
5737 __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5738 __ SmiScale(scratch, index, kPointerSizeLog2); // In delay slot.
5739 __ Daddu(scratch, object, scratch);
5740 __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5741
5742 __ Branch(&done);
5743
5744 __ bind(&out_of_object);
5745 __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5746 // Index is equal to negated out of object property index plus 1.
5747 __ Dsubu(scratch, result, scratch);
5748 __ ld(result, FieldMemOperand(scratch,
5749 FixedArray::kHeaderSize - kPointerSize));
5750 __ bind(deferred->exit());
5751 __ bind(&done);
5752}
5753
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005754#undef __
5755
5756} // namespace internal
5757} // namespace v8