blob: c7bbe9f07a65958556595d4b217d436e1ea9d569 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
6
7#include "src/code-factory.h"
8#include "src/code-stubs.h"
9#include "src/crankshaft/hydrogen-osr.h"
10#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
11#include "src/ic/ic.h"
12#include "src/ic/stub-cache.h"
13#include "src/profiler/cpu-profiler.h"
14
15namespace v8 {
16namespace internal {
17
18
19class SafepointGenerator final : public CallWrapper {
20 public:
21 SafepointGenerator(LCodeGen* codegen,
22 LPointerMap* pointers,
23 Safepoint::DeoptMode mode)
24 : codegen_(codegen),
25 pointers_(pointers),
26 deopt_mode_(mode) { }
27 virtual ~SafepointGenerator() {}
28
29 void BeforeCall(int call_size) const override {}
30
31 void AfterCall() const override {
32 codegen_->RecordSafepoint(pointers_, deopt_mode_);
33 }
34
35 private:
36 LCodeGen* codegen_;
37 LPointerMap* pointers_;
38 Safepoint::DeoptMode deopt_mode_;
39};
40
41
42#define __ masm()->
43
44bool LCodeGen::GenerateCode() {
45 LPhase phase("Z_Code generation", chunk());
46 DCHECK(is_unused());
47 status_ = GENERATING;
48
49 // Open a frame scope to indicate that there is a frame on the stack. The
50 // NONE indicates that the scope shouldn't actually generate code to set up
51 // the frame (that is done in GeneratePrologue).
52 FrameScope frame_scope(masm_, StackFrame::NONE);
53
54 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
55 GenerateJumpTable() && GenerateSafepointTable();
56}
57
58
59void LCodeGen::FinishCode(Handle<Code> code) {
60 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +010061 code->set_stack_slots(GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000062 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
63 PopulateDeoptimizationData(code);
64}
65
66
67void LCodeGen::SaveCallerDoubles() {
68 DCHECK(info()->saves_caller_doubles());
69 DCHECK(NeedsEagerFrame());
70 Comment(";;; Save clobbered callee double registers");
71 int count = 0;
72 BitVector* doubles = chunk()->allocated_double_registers();
73 BitVector::Iterator save_iterator(doubles);
74 while (!save_iterator.Done()) {
75 __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
76 MemOperand(sp, count * kDoubleSize));
77 save_iterator.Advance();
78 count++;
79 }
80}
81
82
83void LCodeGen::RestoreCallerDoubles() {
84 DCHECK(info()->saves_caller_doubles());
85 DCHECK(NeedsEagerFrame());
86 Comment(";;; Restore clobbered callee double registers");
87 BitVector* doubles = chunk()->allocated_double_registers();
88 BitVector::Iterator save_iterator(doubles);
89 int count = 0;
90 while (!save_iterator.Done()) {
91 __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
92 MemOperand(sp, count * kDoubleSize));
93 save_iterator.Advance();
94 count++;
95 }
96}
97
98
99bool LCodeGen::GeneratePrologue() {
100 DCHECK(is_generating());
101
102 if (info()->IsOptimizing()) {
103 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
104
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000105 // a1: Callee's JS function.
106 // cp: Callee's context.
107 // fp: Caller's frame pointer.
108 // lr: Caller's pc.
109 }
110
111 info()->set_prologue_offset(masm_->pc_offset());
112 if (NeedsEagerFrame()) {
113 if (info()->IsStub()) {
Ben Murdochda12d292016-06-02 14:46:10 +0100114 __ StubPrologue(StackFrame::STUB);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000115 } else {
116 __ Prologue(info()->GeneratePreagedPrologue());
117 }
118 frame_is_built_ = true;
119 }
120
121 // Reserve space for the stack slots needed by the code.
122 int slots = GetStackSlotCount();
123 if (slots > 0) {
124 if (FLAG_debug_code) {
125 __ Dsubu(sp, sp, Operand(slots * kPointerSize));
126 __ Push(a0, a1);
127 __ Daddu(a0, sp, Operand(slots * kPointerSize));
128 __ li(a1, Operand(kSlotsZapValue));
129 Label loop;
130 __ bind(&loop);
131 __ Dsubu(a0, a0, Operand(kPointerSize));
132 __ sd(a1, MemOperand(a0, 2 * kPointerSize));
133 __ Branch(&loop, ne, a0, Operand(sp));
134 __ Pop(a0, a1);
135 } else {
136 __ Dsubu(sp, sp, Operand(slots * kPointerSize));
137 }
138 }
139
140 if (info()->saves_caller_doubles()) {
141 SaveCallerDoubles();
142 }
143 return !is_aborted();
144}
145
146
147void LCodeGen::DoPrologue(LPrologue* instr) {
148 Comment(";;; Prologue begin");
149
150 // Possibly allocate a local context.
151 if (info()->scope()->num_heap_slots() > 0) {
152 Comment(";;; Allocate local context");
153 bool need_write_barrier = true;
154 // Argument to NewContext is the function, which is in a1.
155 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
156 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
157 if (info()->scope()->is_script_scope()) {
158 __ push(a1);
159 __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
160 __ CallRuntime(Runtime::kNewScriptContext);
161 deopt_mode = Safepoint::kLazyDeopt;
162 } else if (slots <= FastNewContextStub::kMaximumSlots) {
163 FastNewContextStub stub(isolate(), slots);
164 __ CallStub(&stub);
165 // Result of FastNewContextStub is always in new space.
166 need_write_barrier = false;
167 } else {
168 __ push(a1);
169 __ CallRuntime(Runtime::kNewFunctionContext);
170 }
171 RecordSafepoint(deopt_mode);
172
173 // Context is returned in both v0. It replaces the context passed to us.
174 // It's saved in the stack and kept live in cp.
175 __ mov(cp, v0);
176 __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
177 // Copy any necessary parameters into the context.
178 int num_parameters = scope()->num_parameters();
179 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
180 for (int i = first_parameter; i < num_parameters; i++) {
181 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
182 if (var->IsContextSlot()) {
183 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
184 (num_parameters - 1 - i) * kPointerSize;
185 // Load parameter from stack.
186 __ ld(a0, MemOperand(fp, parameter_offset));
187 // Store it in the context.
188 MemOperand target = ContextMemOperand(cp, var->index());
189 __ sd(a0, target);
190 // Update the write barrier. This clobbers a3 and a0.
191 if (need_write_barrier) {
192 __ RecordWriteContextSlot(
193 cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
194 } else if (FLAG_debug_code) {
195 Label done;
196 __ JumpIfInNewSpace(cp, a0, &done);
197 __ Abort(kExpectedNewSpaceObject);
198 __ bind(&done);
199 }
200 }
201 }
202 Comment(";;; End allocate local context");
203 }
204
205 Comment(";;; Prologue end");
206}
207
208
209void LCodeGen::GenerateOsrPrologue() {
210 // Generate the OSR entry prologue at the first unknown OSR value, or if there
211 // are none, at the OSR entrypoint instruction.
212 if (osr_pc_offset_ >= 0) return;
213
214 osr_pc_offset_ = masm()->pc_offset();
215
216 // Adjust the frame size, subsuming the unoptimized frame into the
217 // optimized frame.
218 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
219 DCHECK(slots >= 0);
220 __ Dsubu(sp, sp, Operand(slots * kPointerSize));
221}
222
223
224void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
225 if (instr->IsCall()) {
226 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
227 }
228 if (!instr->IsLazyBailout() && !instr->IsGap()) {
229 safepoints_.BumpLastLazySafepointIndex();
230 }
231}
232
233
234bool LCodeGen::GenerateDeferredCode() {
235 DCHECK(is_generating());
236 if (deferred_.length() > 0) {
237 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
238 LDeferredCode* code = deferred_[i];
239
240 HValue* value =
241 instructions_->at(code->instruction_index())->hydrogen_value();
242 RecordAndWritePosition(
243 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
244
245 Comment(";;; <@%d,#%d> "
246 "-------------------- Deferred %s --------------------",
247 code->instruction_index(),
248 code->instr()->hydrogen_value()->id(),
249 code->instr()->Mnemonic());
250 __ bind(code->entry());
251 if (NeedsDeferredFrame()) {
252 Comment(";;; Build frame");
253 DCHECK(!frame_is_built_);
254 DCHECK(info()->IsStub());
255 frame_is_built_ = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000256 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
Ben Murdochda12d292016-06-02 14:46:10 +0100257 __ PushCommonFrame(scratch0());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000258 Comment(";;; Deferred code");
259 }
260 code->Generate();
261 if (NeedsDeferredFrame()) {
262 Comment(";;; Destroy frame");
263 DCHECK(frame_is_built_);
Ben Murdochda12d292016-06-02 14:46:10 +0100264 __ PopCommonFrame(scratch0());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000265 frame_is_built_ = false;
266 }
267 __ jmp(code->exit());
268 }
269 }
270 // Deferred code is the last part of the instruction sequence. Mark
271 // the generated code as done unless we bailed out.
272 if (!is_aborted()) status_ = DONE;
273 return !is_aborted();
274}
275
276
277bool LCodeGen::GenerateJumpTable() {
278 if (jump_table_.length() > 0) {
279 Comment(";;; -------------------- Jump table --------------------");
280 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
281 Label table_start, call_deopt_entry;
282
283 __ bind(&table_start);
284 Label needs_frame;
285 Address base = jump_table_[0]->address;
286 for (int i = 0; i < jump_table_.length(); i++) {
287 Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
288 __ bind(&table_entry->label);
289 Address entry = table_entry->address;
290 DeoptComment(table_entry->deopt_info);
291
292 // Second-level deopt table entries are contiguous and small, so instead
293 // of loading the full, absolute address of each one, load the base
294 // address and add an immediate offset.
295 if (is_int16(entry - base)) {
296 if (table_entry->needs_frame) {
297 DCHECK(!info()->saves_caller_doubles());
298 Comment(";;; call deopt with frame");
Ben Murdochda12d292016-06-02 14:46:10 +0100299 __ PushCommonFrame();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000300 __ BranchAndLink(&needs_frame, USE_DELAY_SLOT);
301 __ li(t9, Operand(entry - base));
302 } else {
303 __ BranchAndLink(&call_deopt_entry, USE_DELAY_SLOT);
304 __ li(t9, Operand(entry - base));
305 }
306
307 } else {
308 __ li(t9, Operand(entry - base));
309 if (table_entry->needs_frame) {
310 DCHECK(!info()->saves_caller_doubles());
311 Comment(";;; call deopt with frame");
Ben Murdochda12d292016-06-02 14:46:10 +0100312 __ PushCommonFrame();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000313 __ BranchAndLink(&needs_frame);
314 } else {
315 __ BranchAndLink(&call_deopt_entry);
316 }
317 }
318 info()->LogDeoptCallPosition(masm()->pc_offset(),
319 table_entry->deopt_info.inlining_id);
320 }
321 if (needs_frame.is_linked()) {
322 __ bind(&needs_frame);
323 // This variant of deopt can only be used with stubs. Since we don't
324 // have a function pointer to install in the stack frame that we're
325 // building, install a special marker there instead.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000326 __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
327 __ push(at);
Ben Murdochda12d292016-06-02 14:46:10 +0100328 DCHECK(info()->IsStub());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000329 }
330
331 Comment(";;; call deopt");
332 __ bind(&call_deopt_entry);
333
334 if (info()->saves_caller_doubles()) {
335 DCHECK(info()->IsStub());
336 RestoreCallerDoubles();
337 }
338
339 __ li(at,
340 Operand(reinterpret_cast<int64_t>(base), RelocInfo::RUNTIME_ENTRY));
341 __ Daddu(t9, t9, Operand(at));
342 __ Jump(t9);
343 }
344 // The deoptimization jump table is the last part of the instruction
345 // sequence. Mark the generated code as done unless we bailed out.
346 if (!is_aborted()) status_ = DONE;
347 return !is_aborted();
348}
349
350
351bool LCodeGen::GenerateSafepointTable() {
352 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100353 safepoints_.Emit(masm(), GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000354 return !is_aborted();
355}
356
357
358Register LCodeGen::ToRegister(int index) const {
359 return Register::from_code(index);
360}
361
362
363DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
364 return DoubleRegister::from_code(index);
365}
366
367
368Register LCodeGen::ToRegister(LOperand* op) const {
369 DCHECK(op->IsRegister());
370 return ToRegister(op->index());
371}
372
373
374Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
375 if (op->IsRegister()) {
376 return ToRegister(op->index());
377 } else if (op->IsConstantOperand()) {
378 LConstantOperand* const_op = LConstantOperand::cast(op);
379 HConstant* constant = chunk_->LookupConstant(const_op);
380 Handle<Object> literal = constant->handle(isolate());
381 Representation r = chunk_->LookupLiteralRepresentation(const_op);
382 if (r.IsInteger32()) {
383 AllowDeferredHandleDereference get_number;
384 DCHECK(literal->IsNumber());
385 __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
386 } else if (r.IsSmi()) {
387 DCHECK(constant->HasSmiValue());
388 __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
389 } else if (r.IsDouble()) {
390 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
391 } else {
392 DCHECK(r.IsSmiOrTagged());
393 __ li(scratch, literal);
394 }
395 return scratch;
396 } else if (op->IsStackSlot()) {
397 __ ld(scratch, ToMemOperand(op));
398 return scratch;
399 }
400 UNREACHABLE();
401 return scratch;
402}
403
404
405DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
406 DCHECK(op->IsDoubleRegister());
407 return ToDoubleRegister(op->index());
408}
409
410
411DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
412 FloatRegister flt_scratch,
413 DoubleRegister dbl_scratch) {
414 if (op->IsDoubleRegister()) {
415 return ToDoubleRegister(op->index());
416 } else if (op->IsConstantOperand()) {
417 LConstantOperand* const_op = LConstantOperand::cast(op);
418 HConstant* constant = chunk_->LookupConstant(const_op);
419 Handle<Object> literal = constant->handle(isolate());
420 Representation r = chunk_->LookupLiteralRepresentation(const_op);
421 if (r.IsInteger32()) {
422 DCHECK(literal->IsNumber());
423 __ li(at, Operand(static_cast<int32_t>(literal->Number())));
424 __ mtc1(at, flt_scratch);
425 __ cvt_d_w(dbl_scratch, flt_scratch);
426 return dbl_scratch;
427 } else if (r.IsDouble()) {
428 Abort(kUnsupportedDoubleImmediate);
429 } else if (r.IsTagged()) {
430 Abort(kUnsupportedTaggedImmediate);
431 }
432 } else if (op->IsStackSlot()) {
433 MemOperand mem_op = ToMemOperand(op);
434 __ ldc1(dbl_scratch, mem_op);
435 return dbl_scratch;
436 }
437 UNREACHABLE();
438 return dbl_scratch;
439}
440
441
442Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
443 HConstant* constant = chunk_->LookupConstant(op);
444 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
445 return constant->handle(isolate());
446}
447
448
449bool LCodeGen::IsInteger32(LConstantOperand* op) const {
450 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
451}
452
453
454bool LCodeGen::IsSmi(LConstantOperand* op) const {
455 return chunk_->LookupLiteralRepresentation(op).IsSmi();
456}
457
458
459int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
460 // return ToRepresentation(op, Representation::Integer32());
461 HConstant* constant = chunk_->LookupConstant(op);
462 return constant->Integer32Value();
463}
464
465
466int64_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op,
467 const Representation& r) const {
468 HConstant* constant = chunk_->LookupConstant(op);
469 int32_t value = constant->Integer32Value();
470 if (r.IsInteger32()) return value;
471 DCHECK(r.IsSmiOrTagged());
472 return reinterpret_cast<int64_t>(Smi::FromInt(value));
473}
474
475
476Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
477 HConstant* constant = chunk_->LookupConstant(op);
478 return Smi::FromInt(constant->Integer32Value());
479}
480
481
482double LCodeGen::ToDouble(LConstantOperand* op) const {
483 HConstant* constant = chunk_->LookupConstant(op);
484 DCHECK(constant->HasDoubleValue());
485 return constant->DoubleValue();
486}
487
488
489Operand LCodeGen::ToOperand(LOperand* op) {
490 if (op->IsConstantOperand()) {
491 LConstantOperand* const_op = LConstantOperand::cast(op);
492 HConstant* constant = chunk()->LookupConstant(const_op);
493 Representation r = chunk_->LookupLiteralRepresentation(const_op);
494 if (r.IsSmi()) {
495 DCHECK(constant->HasSmiValue());
496 return Operand(Smi::FromInt(constant->Integer32Value()));
497 } else if (r.IsInteger32()) {
498 DCHECK(constant->HasInteger32Value());
499 return Operand(constant->Integer32Value());
500 } else if (r.IsDouble()) {
501 Abort(kToOperandUnsupportedDoubleImmediate);
502 }
503 DCHECK(r.IsTagged());
504 return Operand(constant->handle(isolate()));
505 } else if (op->IsRegister()) {
506 return Operand(ToRegister(op));
507 } else if (op->IsDoubleRegister()) {
508 Abort(kToOperandIsDoubleRegisterUnimplemented);
509 return Operand((int64_t)0);
510 }
511 // Stack slots not implemented, use ToMemOperand instead.
512 UNREACHABLE();
513 return Operand((int64_t)0);
514}
515
516
517static int ArgumentsOffsetWithoutFrame(int index) {
518 DCHECK(index < 0);
519 return -(index + 1) * kPointerSize;
520}
521
522
523MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
524 DCHECK(!op->IsRegister());
525 DCHECK(!op->IsDoubleRegister());
526 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
527 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100528 return MemOperand(fp, FrameSlotToFPOffset(op->index()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000529 } else {
530 // Retrieve parameter without eager stack-frame relative to the
531 // stack-pointer.
532 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
533 }
534}
535
536
537MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
538 DCHECK(op->IsDoubleStackSlot());
539 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100540 // return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
541 return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kIntSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000542 } else {
543 // Retrieve parameter without eager stack-frame relative to the
544 // stack-pointer.
545 // return MemOperand(
546 // sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
547 return MemOperand(
548 sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize);
549 }
550}
551
552
553void LCodeGen::WriteTranslation(LEnvironment* environment,
554 Translation* translation) {
555 if (environment == NULL) return;
556
557 // The translation includes one command per value in the environment.
558 int translation_size = environment->translation_size();
559
560 WriteTranslation(environment->outer(), translation);
561 WriteTranslationFrame(environment, translation);
562
563 int object_index = 0;
564 int dematerialized_index = 0;
565 for (int i = 0; i < translation_size; ++i) {
566 LOperand* value = environment->values()->at(i);
567 AddToTranslation(
568 environment, translation, value, environment->HasTaggedValueAt(i),
569 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
570 }
571}
572
573
574void LCodeGen::AddToTranslation(LEnvironment* environment,
575 Translation* translation,
576 LOperand* op,
577 bool is_tagged,
578 bool is_uint32,
579 int* object_index_pointer,
580 int* dematerialized_index_pointer) {
581 if (op == LEnvironment::materialization_marker()) {
582 int object_index = (*object_index_pointer)++;
583 if (environment->ObjectIsDuplicateAt(object_index)) {
584 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
585 translation->DuplicateObject(dupe_of);
586 return;
587 }
588 int object_length = environment->ObjectLengthAt(object_index);
589 if (environment->ObjectIsArgumentsAt(object_index)) {
590 translation->BeginArgumentsObject(object_length);
591 } else {
592 translation->BeginCapturedObject(object_length);
593 }
594 int dematerialized_index = *dematerialized_index_pointer;
595 int env_offset = environment->translation_size() + dematerialized_index;
596 *dematerialized_index_pointer += object_length;
597 for (int i = 0; i < object_length; ++i) {
598 LOperand* value = environment->values()->at(env_offset + i);
599 AddToTranslation(environment,
600 translation,
601 value,
602 environment->HasTaggedValueAt(env_offset + i),
603 environment->HasUint32ValueAt(env_offset + i),
604 object_index_pointer,
605 dematerialized_index_pointer);
606 }
607 return;
608 }
609
610 if (op->IsStackSlot()) {
611 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000612 if (is_tagged) {
613 translation->StoreStackSlot(index);
614 } else if (is_uint32) {
615 translation->StoreUint32StackSlot(index);
616 } else {
617 translation->StoreInt32StackSlot(index);
618 }
619 } else if (op->IsDoubleStackSlot()) {
620 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000621 translation->StoreDoubleStackSlot(index);
622 } else if (op->IsRegister()) {
623 Register reg = ToRegister(op);
624 if (is_tagged) {
625 translation->StoreRegister(reg);
626 } else if (is_uint32) {
627 translation->StoreUint32Register(reg);
628 } else {
629 translation->StoreInt32Register(reg);
630 }
631 } else if (op->IsDoubleRegister()) {
632 DoubleRegister reg = ToDoubleRegister(op);
633 translation->StoreDoubleRegister(reg);
634 } else if (op->IsConstantOperand()) {
635 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
636 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
637 translation->StoreLiteral(src_index);
638 } else {
639 UNREACHABLE();
640 }
641}
642
643
644void LCodeGen::CallCode(Handle<Code> code,
645 RelocInfo::Mode mode,
646 LInstruction* instr) {
647 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
648}
649
650
651void LCodeGen::CallCodeGeneric(Handle<Code> code,
652 RelocInfo::Mode mode,
653 LInstruction* instr,
654 SafepointMode safepoint_mode) {
655 DCHECK(instr != NULL);
656 __ Call(code, mode);
657 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
658}
659
660
661void LCodeGen::CallRuntime(const Runtime::Function* function,
662 int num_arguments,
663 LInstruction* instr,
664 SaveFPRegsMode save_doubles) {
665 DCHECK(instr != NULL);
666
667 __ CallRuntime(function, num_arguments, save_doubles);
668
669 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
670}
671
672
673void LCodeGen::LoadContextFromDeferred(LOperand* context) {
674 if (context->IsRegister()) {
675 __ Move(cp, ToRegister(context));
676 } else if (context->IsStackSlot()) {
677 __ ld(cp, ToMemOperand(context));
678 } else if (context->IsConstantOperand()) {
679 HConstant* constant =
680 chunk_->LookupConstant(LConstantOperand::cast(context));
681 __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
682 } else {
683 UNREACHABLE();
684 }
685}
686
687
688void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
689 int argc,
690 LInstruction* instr,
691 LOperand* context) {
692 LoadContextFromDeferred(context);
693 __ CallRuntimeSaveDoubles(id);
694 RecordSafepointWithRegisters(
695 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
696}
697
698
699void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
700 Safepoint::DeoptMode mode) {
701 environment->set_has_been_used();
702 if (!environment->HasBeenRegistered()) {
703 // Physical stack frame layout:
704 // -x ............. -4 0 ..................................... y
705 // [incoming arguments] [spill slots] [pushed outgoing arguments]
706
707 // Layout of the environment:
708 // 0 ..................................................... size-1
709 // [parameters] [locals] [expression stack including arguments]
710
711 // Layout of the translation:
712 // 0 ........................................................ size - 1 + 4
713 // [expression stack including arguments] [locals] [4 words] [parameters]
714 // |>------------ translation_size ------------<|
715
716 int frame_count = 0;
717 int jsframe_count = 0;
718 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
719 ++frame_count;
720 if (e->frame_type() == JS_FUNCTION) {
721 ++jsframe_count;
722 }
723 }
724 Translation translation(&translations_, frame_count, jsframe_count, zone());
725 WriteTranslation(environment, &translation);
726 int deoptimization_index = deoptimizations_.length();
727 int pc_offset = masm()->pc_offset();
728 environment->Register(deoptimization_index,
729 translation.index(),
730 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
731 deoptimizations_.Add(environment, zone());
732 }
733}
734
735
736void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
737 Deoptimizer::DeoptReason deopt_reason,
738 Deoptimizer::BailoutType bailout_type,
739 Register src1, const Operand& src2) {
740 LEnvironment* environment = instr->environment();
741 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
742 DCHECK(environment->HasBeenRegistered());
743 int id = environment->deoptimization_index();
744 Address entry =
745 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
746 if (entry == NULL) {
747 Abort(kBailoutWasNotPrepared);
748 return;
749 }
750
751 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
752 Register scratch = scratch0();
753 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
754 Label no_deopt;
755 __ Push(a1, scratch);
756 __ li(scratch, Operand(count));
757 __ lw(a1, MemOperand(scratch));
758 __ Subu(a1, a1, Operand(1));
759 __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
760 __ li(a1, Operand(FLAG_deopt_every_n_times));
761 __ sw(a1, MemOperand(scratch));
762 __ Pop(a1, scratch);
763
764 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
765 __ bind(&no_deopt);
766 __ sw(a1, MemOperand(scratch));
767 __ Pop(a1, scratch);
768 }
769
770 if (info()->ShouldTrapOnDeopt()) {
771 Label skip;
772 if (condition != al) {
773 __ Branch(&skip, NegateCondition(condition), src1, src2);
774 }
775 __ stop("trap_on_deopt");
776 __ bind(&skip);
777 }
778
779 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
780
781 DCHECK(info()->IsStub() || frame_is_built_);
782 // Go through jump table if we need to handle condition, build frame, or
783 // restore caller doubles.
784 if (condition == al && frame_is_built_ &&
785 !info()->saves_caller_doubles()) {
786 DeoptComment(deopt_info);
787 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
788 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
789 } else {
790 Deoptimizer::JumpTableEntry* table_entry =
791 new (zone()) Deoptimizer::JumpTableEntry(
792 entry, deopt_info, bailout_type, !frame_is_built_);
793 // We often have several deopts to the same entry, reuse the last
794 // jump entry if this is the case.
795 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
796 jump_table_.is_empty() ||
797 !table_entry->IsEquivalentTo(*jump_table_.last())) {
798 jump_table_.Add(table_entry, zone());
799 }
800 __ Branch(&jump_table_.last()->label, condition, src1, src2);
801 }
802}
803
804
805void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
806 Deoptimizer::DeoptReason deopt_reason,
807 Register src1, const Operand& src2) {
808 Deoptimizer::BailoutType bailout_type = info()->IsStub()
809 ? Deoptimizer::LAZY
810 : Deoptimizer::EAGER;
811 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
812}
813
814
815void LCodeGen::RecordSafepointWithLazyDeopt(
816 LInstruction* instr, SafepointMode safepoint_mode) {
817 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
818 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
819 } else {
820 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
821 RecordSafepointWithRegisters(
822 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
823 }
824}
825
826
827void LCodeGen::RecordSafepoint(
828 LPointerMap* pointers,
829 Safepoint::Kind kind,
830 int arguments,
831 Safepoint::DeoptMode deopt_mode) {
832 DCHECK(expected_safepoint_kind_ == kind);
833
834 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
835 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
836 kind, arguments, deopt_mode);
837 for (int i = 0; i < operands->length(); i++) {
838 LOperand* pointer = operands->at(i);
839 if (pointer->IsStackSlot()) {
840 safepoint.DefinePointerSlot(pointer->index(), zone());
841 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
842 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
843 }
844 }
845}
846
847
848void LCodeGen::RecordSafepoint(LPointerMap* pointers,
849 Safepoint::DeoptMode deopt_mode) {
850 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
851}
852
853
854void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
855 LPointerMap empty_pointers(zone());
856 RecordSafepoint(&empty_pointers, deopt_mode);
857}
858
859
860void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
861 int arguments,
862 Safepoint::DeoptMode deopt_mode) {
863 RecordSafepoint(
864 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
865}
866
867
868void LCodeGen::RecordAndWritePosition(int position) {
869 if (position == RelocInfo::kNoPosition) return;
870 masm()->positions_recorder()->RecordPosition(position);
871 masm()->positions_recorder()->WriteRecordedPositions();
872}
873
874
875static const char* LabelType(LLabel* label) {
876 if (label->is_loop_header()) return " (loop header)";
877 if (label->is_osr_entry()) return " (OSR entry)";
878 return "";
879}
880
881
882void LCodeGen::DoLabel(LLabel* label) {
883 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
884 current_instruction_,
885 label->hydrogen_value()->id(),
886 label->block_id(),
887 LabelType(label));
888 __ bind(label->label());
889 current_block_ = label->block_id();
890 DoGap(label);
891}
892
893
894void LCodeGen::DoParallelMove(LParallelMove* move) {
895 resolver_.Resolve(move);
896}
897
898
899void LCodeGen::DoGap(LGap* gap) {
900 for (int i = LGap::FIRST_INNER_POSITION;
901 i <= LGap::LAST_INNER_POSITION;
902 i++) {
903 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
904 LParallelMove* move = gap->GetParallelMove(inner_pos);
905 if (move != NULL) DoParallelMove(move);
906 }
907}
908
909
910void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
911 DoGap(instr);
912}
913
914
915void LCodeGen::DoParameter(LParameter* instr) {
916 // Nothing to do.
917}
918
919
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000920void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
921 GenerateOsrPrologue();
922}
923
924
925void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
926 Register dividend = ToRegister(instr->dividend());
927 int32_t divisor = instr->divisor();
928 DCHECK(dividend.is(ToRegister(instr->result())));
929
930 // Theoretically, a variation of the branch-free code for integer division by
931 // a power of 2 (calculating the remainder via an additional multiplication
932 // (which gets simplified to an 'and') and subtraction) should be faster, and
933 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
934 // indicate that positive dividends are heavily favored, so the branching
935 // version performs better.
936 HMod* hmod = instr->hydrogen();
937 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
938 Label dividend_is_not_negative, done;
939
940 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
941 __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
942 // Note: The code below even works when right contains kMinInt.
943 __ dsubu(dividend, zero_reg, dividend);
944 __ And(dividend, dividend, Operand(mask));
945 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
946 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
947 Operand(zero_reg));
948 }
949 __ Branch(USE_DELAY_SLOT, &done);
950 __ dsubu(dividend, zero_reg, dividend);
951 }
952
953 __ bind(&dividend_is_not_negative);
954 __ And(dividend, dividend, Operand(mask));
955 __ bind(&done);
956}
957
958
959void LCodeGen::DoModByConstI(LModByConstI* instr) {
960 Register dividend = ToRegister(instr->dividend());
961 int32_t divisor = instr->divisor();
962 Register result = ToRegister(instr->result());
963 DCHECK(!dividend.is(result));
964
965 if (divisor == 0) {
966 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
967 return;
968 }
969
970 __ TruncatingDiv(result, dividend, Abs(divisor));
971 __ Dmul(result, result, Operand(Abs(divisor)));
972 __ Dsubu(result, dividend, Operand(result));
973
974 // Check for negative zero.
975 HMod* hmod = instr->hydrogen();
976 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
977 Label remainder_not_zero;
978 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
979 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
980 Operand(zero_reg));
981 __ bind(&remainder_not_zero);
982 }
983}
984
985
986void LCodeGen::DoModI(LModI* instr) {
987 HMod* hmod = instr->hydrogen();
988 const Register left_reg = ToRegister(instr->left());
989 const Register right_reg = ToRegister(instr->right());
990 const Register result_reg = ToRegister(instr->result());
991
992 // div runs in the background while we check for special cases.
993 __ Dmod(result_reg, left_reg, right_reg);
994
995 Label done;
996 // Check for x % 0, we have to deopt in this case because we can't return a
997 // NaN.
998 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
999 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
1000 Operand(zero_reg));
1001 }
1002
1003 // Check for kMinInt % -1, div will return kMinInt, which is not what we
1004 // want. We have to deopt if we care about -0, because we can't return that.
1005 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1006 Label no_overflow_possible;
1007 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1008 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1009 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
1010 } else {
1011 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1012 __ Branch(USE_DELAY_SLOT, &done);
1013 __ mov(result_reg, zero_reg);
1014 }
1015 __ bind(&no_overflow_possible);
1016 }
1017
1018 // If we care about -0, test if the dividend is <0 and the result is 0.
1019 __ Branch(&done, ge, left_reg, Operand(zero_reg));
1020
1021 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1022 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
1023 Operand(zero_reg));
1024 }
1025 __ bind(&done);
1026}
1027
1028
1029void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1030 Register dividend = ToRegister(instr->dividend());
1031 int32_t divisor = instr->divisor();
1032 Register result = ToRegister(instr->result());
1033 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1034 DCHECK(!result.is(dividend));
1035
1036 // Check for (0 / -x) that will produce negative zero.
1037 HDiv* hdiv = instr->hydrogen();
1038 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1039 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1040 Operand(zero_reg));
1041 }
1042 // Check for (kMinInt / -1).
1043 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1044 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
1045 }
1046 // Deoptimize if remainder will not be 0.
1047 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1048 divisor != 1 && divisor != -1) {
1049 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1050 __ And(at, dividend, Operand(mask));
1051 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
1052 }
1053
1054 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1055 __ Dsubu(result, zero_reg, dividend);
1056 return;
1057 }
1058 uint16_t shift = WhichPowerOf2Abs(divisor);
1059 if (shift == 0) {
1060 __ Move(result, dividend);
1061 } else if (shift == 1) {
1062 __ dsrl32(result, dividend, 31);
1063 __ Daddu(result, dividend, Operand(result));
1064 } else {
1065 __ dsra32(result, dividend, 31);
1066 __ dsrl32(result, result, 32 - shift);
1067 __ Daddu(result, dividend, Operand(result));
1068 }
1069 if (shift > 0) __ dsra(result, result, shift);
1070 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1071}
1072
1073
1074void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1075 Register dividend = ToRegister(instr->dividend());
1076 int32_t divisor = instr->divisor();
1077 Register result = ToRegister(instr->result());
1078 DCHECK(!dividend.is(result));
1079
1080 if (divisor == 0) {
1081 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1082 return;
1083 }
1084
1085 // Check for (0 / -x) that will produce negative zero.
1086 HDiv* hdiv = instr->hydrogen();
1087 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1088 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1089 Operand(zero_reg));
1090 }
1091
1092 __ TruncatingDiv(result, dividend, Abs(divisor));
1093 if (divisor < 0) __ Subu(result, zero_reg, result);
1094
1095 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1096 __ Dmul(scratch0(), result, Operand(divisor));
1097 __ Dsubu(scratch0(), scratch0(), dividend);
1098 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
1099 Operand(zero_reg));
1100 }
1101}
1102
1103
1104// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1105void LCodeGen::DoDivI(LDivI* instr) {
1106 HBinaryOperation* hdiv = instr->hydrogen();
1107 Register dividend = ToRegister(instr->dividend());
1108 Register divisor = ToRegister(instr->divisor());
1109 const Register result = ToRegister(instr->result());
1110
1111 // On MIPS div is asynchronous - it will run in the background while we
1112 // check for special cases.
1113 __ Div(result, dividend, divisor);
1114
1115 // Check for x / 0.
1116 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1117 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1118 Operand(zero_reg));
1119 }
1120
1121 // Check for (0 / -x) that will produce negative zero.
1122 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1123 Label left_not_zero;
1124 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1125 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1126 Operand(zero_reg));
1127 __ bind(&left_not_zero);
1128 }
1129
1130 // Check for (kMinInt / -1).
1131 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1132 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1133 Label left_not_min_int;
1134 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1135 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1136 __ bind(&left_not_min_int);
1137 }
1138
1139 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1140 // Calculate remainder.
1141 Register remainder = ToRegister(instr->temp());
1142 if (kArchVariant != kMips64r6) {
1143 __ mfhi(remainder);
1144 } else {
1145 __ dmod(remainder, dividend, divisor);
1146 }
1147 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
1148 Operand(zero_reg));
1149 }
1150}
1151
1152
1153void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1154 DoubleRegister addend = ToDoubleRegister(instr->addend());
1155 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1156 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1157
1158 // This is computed in-place.
1159 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1160
1161 __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0());
1162}
1163
1164
1165void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1166 Register dividend = ToRegister(instr->dividend());
1167 Register result = ToRegister(instr->result());
1168 int32_t divisor = instr->divisor();
1169 Register scratch = result.is(dividend) ? scratch0() : dividend;
1170 DCHECK(!result.is(dividend) || !scratch.is(dividend));
1171
1172 // If the divisor is 1, return the dividend.
1173 if (divisor == 1) {
1174 __ Move(result, dividend);
1175 return;
1176 }
1177
1178 // If the divisor is positive, things are easy: There can be no deopts and we
1179 // can simply do an arithmetic right shift.
1180 uint16_t shift = WhichPowerOf2Abs(divisor);
1181 if (divisor > 1) {
1182 __ dsra(result, dividend, shift);
1183 return;
1184 }
1185
1186 // If the divisor is negative, we have to negate and handle edge cases.
1187 // Dividend can be the same register as result so save the value of it
1188 // for checking overflow.
1189 __ Move(scratch, dividend);
1190
1191 __ Dsubu(result, zero_reg, dividend);
1192 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1193 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
1194 }
1195
1196 __ Xor(scratch, scratch, result);
1197 // Dividing by -1 is basically negation, unless we overflow.
1198 if (divisor == -1) {
1199 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1200 DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, result, Operand(kMaxInt));
1201 }
1202 return;
1203 }
1204
1205 // If the negation could not overflow, simply shifting is OK.
1206 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1207 __ dsra(result, result, shift);
1208 return;
1209 }
1210
1211 Label no_overflow, done;
1212 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
1213 __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE);
1214 __ Branch(&done);
1215 __ bind(&no_overflow);
1216 __ dsra(result, result, shift);
1217 __ bind(&done);
1218}
1219
1220
1221void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1222 Register dividend = ToRegister(instr->dividend());
1223 int32_t divisor = instr->divisor();
1224 Register result = ToRegister(instr->result());
1225 DCHECK(!dividend.is(result));
1226
1227 if (divisor == 0) {
1228 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1229 return;
1230 }
1231
1232 // Check for (0 / -x) that will produce negative zero.
1233 HMathFloorOfDiv* hdiv = instr->hydrogen();
1234 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1235 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1236 Operand(zero_reg));
1237 }
1238
1239 // Easy case: We need no dynamic check for the dividend and the flooring
1240 // division is the same as the truncating division.
1241 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1242 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1243 __ TruncatingDiv(result, dividend, Abs(divisor));
1244 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1245 return;
1246 }
1247
1248 // In the general case we may need to adjust before and after the truncating
1249 // division to get a flooring division.
1250 Register temp = ToRegister(instr->temp());
1251 DCHECK(!temp.is(dividend) && !temp.is(result));
1252 Label needs_adjustment, done;
1253 __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1254 dividend, Operand(zero_reg));
1255 __ TruncatingDiv(result, dividend, Abs(divisor));
1256 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1257 __ jmp(&done);
1258 __ bind(&needs_adjustment);
1259 __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1260 __ TruncatingDiv(result, temp, Abs(divisor));
1261 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1262 __ Dsubu(result, result, Operand(1));
1263 __ bind(&done);
1264}
1265
1266
1267// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1268void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1269 HBinaryOperation* hdiv = instr->hydrogen();
1270 Register dividend = ToRegister(instr->dividend());
1271 Register divisor = ToRegister(instr->divisor());
1272 const Register result = ToRegister(instr->result());
1273
1274 // On MIPS div is asynchronous - it will run in the background while we
1275 // check for special cases.
1276 __ Ddiv(result, dividend, divisor);
1277
1278 // Check for x / 0.
1279 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1280 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1281 Operand(zero_reg));
1282 }
1283
1284 // Check for (0 / -x) that will produce negative zero.
1285 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1286 Label left_not_zero;
1287 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1288 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1289 Operand(zero_reg));
1290 __ bind(&left_not_zero);
1291 }
1292
1293 // Check for (kMinInt / -1).
1294 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1295 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1296 Label left_not_min_int;
1297 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1298 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1299 __ bind(&left_not_min_int);
1300 }
1301
1302 // We performed a truncating division. Correct the result if necessary.
1303 Label done;
1304 Register remainder = scratch0();
1305 if (kArchVariant != kMips64r6) {
1306 __ mfhi(remainder);
1307 } else {
1308 __ dmod(remainder, dividend, divisor);
1309 }
1310 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1311 __ Xor(remainder, remainder, Operand(divisor));
1312 __ Branch(&done, ge, remainder, Operand(zero_reg));
1313 __ Dsubu(result, result, Operand(1));
1314 __ bind(&done);
1315}
1316
1317
1318void LCodeGen::DoMulS(LMulS* instr) {
1319 Register scratch = scratch0();
1320 Register result = ToRegister(instr->result());
1321 // Note that result may alias left.
1322 Register left = ToRegister(instr->left());
1323 LOperand* right_op = instr->right();
1324
1325 bool bailout_on_minus_zero =
1326 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1327 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1328
1329 if (right_op->IsConstantOperand()) {
1330 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1331
1332 if (bailout_on_minus_zero && (constant < 0)) {
1333 // The case of a null constant will be handled separately.
1334 // If constant is negative and left is null, the result should be -0.
1335 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
1336 }
1337
1338 switch (constant) {
1339 case -1:
1340 if (overflow) {
Ben Murdochda12d292016-06-02 14:46:10 +01001341 Label no_overflow;
1342 __ DsubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
1343 DeoptimizeIf(al, instr);
1344 __ bind(&no_overflow);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001345 } else {
1346 __ Dsubu(result, zero_reg, left);
1347 }
1348 break;
1349 case 0:
1350 if (bailout_on_minus_zero) {
1351 // If left is strictly negative and the constant is null, the
1352 // result is -0. Deoptimize if required, otherwise return 0.
1353 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
1354 Operand(zero_reg));
1355 }
1356 __ mov(result, zero_reg);
1357 break;
1358 case 1:
1359 // Nothing to do.
1360 __ Move(result, left);
1361 break;
1362 default:
1363 // Multiplying by powers of two and powers of two plus or minus
1364 // one can be done faster with shifted operands.
1365 // For other constants we emit standard code.
1366 int32_t mask = constant >> 31;
1367 uint32_t constant_abs = (constant + mask) ^ mask;
1368
1369 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1370 int32_t shift = WhichPowerOf2(constant_abs);
1371 __ dsll(result, left, shift);
1372 // Correct the sign of the result if the constant is negative.
1373 if (constant < 0) __ Dsubu(result, zero_reg, result);
1374 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1375 int32_t shift = WhichPowerOf2(constant_abs - 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001376 __ Dlsa(result, left, left, shift);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001377 // Correct the sign of the result if the constant is negative.
1378 if (constant < 0) __ Dsubu(result, zero_reg, result);
1379 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1380 int32_t shift = WhichPowerOf2(constant_abs + 1);
1381 __ dsll(scratch, left, shift);
1382 __ Dsubu(result, scratch, left);
1383 // Correct the sign of the result if the constant is negative.
1384 if (constant < 0) __ Dsubu(result, zero_reg, result);
1385 } else {
1386 // Generate standard code.
1387 __ li(at, constant);
1388 __ Dmul(result, left, at);
1389 }
1390 }
1391 } else {
1392 DCHECK(right_op->IsRegister());
1393 Register right = ToRegister(right_op);
1394
1395 if (overflow) {
1396 // hi:lo = left * right.
1397 __ Dmulh(result, left, right);
1398 __ dsra32(scratch, result, 0);
1399 __ sra(at, result, 31);
1400 __ SmiTag(result);
1401 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
1402 } else {
1403 __ SmiUntag(result, left);
1404 __ dmul(result, result, right);
1405 }
1406
1407 if (bailout_on_minus_zero) {
1408 Label done;
1409 __ Xor(at, left, right);
1410 __ Branch(&done, ge, at, Operand(zero_reg));
1411 // Bail out if the result is minus zero.
1412 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
1413 Operand(zero_reg));
1414 __ bind(&done);
1415 }
1416 }
1417}
1418
1419
1420void LCodeGen::DoMulI(LMulI* instr) {
1421 Register scratch = scratch0();
1422 Register result = ToRegister(instr->result());
1423 // Note that result may alias left.
1424 Register left = ToRegister(instr->left());
1425 LOperand* right_op = instr->right();
1426
1427 bool bailout_on_minus_zero =
1428 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1429 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1430
1431 if (right_op->IsConstantOperand()) {
1432 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1433
1434 if (bailout_on_minus_zero && (constant < 0)) {
1435 // The case of a null constant will be handled separately.
1436 // If constant is negative and left is null, the result should be -0.
1437 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
1438 }
1439
1440 switch (constant) {
1441 case -1:
1442 if (overflow) {
Ben Murdochda12d292016-06-02 14:46:10 +01001443 Label no_overflow;
1444 __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
1445 DeoptimizeIf(al, instr);
1446 __ bind(&no_overflow);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001447 } else {
1448 __ Subu(result, zero_reg, left);
1449 }
1450 break;
1451 case 0:
1452 if (bailout_on_minus_zero) {
1453 // If left is strictly negative and the constant is null, the
1454 // result is -0. Deoptimize if required, otherwise return 0.
1455 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
1456 Operand(zero_reg));
1457 }
1458 __ mov(result, zero_reg);
1459 break;
1460 case 1:
1461 // Nothing to do.
1462 __ Move(result, left);
1463 break;
1464 default:
1465 // Multiplying by powers of two and powers of two plus or minus
1466 // one can be done faster with shifted operands.
1467 // For other constants we emit standard code.
1468 int32_t mask = constant >> 31;
1469 uint32_t constant_abs = (constant + mask) ^ mask;
1470
1471 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1472 int32_t shift = WhichPowerOf2(constant_abs);
1473 __ sll(result, left, shift);
1474 // Correct the sign of the result if the constant is negative.
1475 if (constant < 0) __ Subu(result, zero_reg, result);
1476 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1477 int32_t shift = WhichPowerOf2(constant_abs - 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001478 __ Lsa(result, left, left, shift);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001479 // Correct the sign of the result if the constant is negative.
1480 if (constant < 0) __ Subu(result, zero_reg, result);
1481 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1482 int32_t shift = WhichPowerOf2(constant_abs + 1);
1483 __ sll(scratch, left, shift);
1484 __ Subu(result, scratch, left);
1485 // Correct the sign of the result if the constant is negative.
1486 if (constant < 0) __ Subu(result, zero_reg, result);
1487 } else {
1488 // Generate standard code.
1489 __ li(at, constant);
1490 __ Mul(result, left, at);
1491 }
1492 }
1493
1494 } else {
1495 DCHECK(right_op->IsRegister());
1496 Register right = ToRegister(right_op);
1497
1498 if (overflow) {
1499 // hi:lo = left * right.
1500 __ Dmul(result, left, right);
1501 __ dsra32(scratch, result, 0);
1502 __ sra(at, result, 31);
1503
1504 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
1505 } else {
1506 __ mul(result, left, right);
1507 }
1508
1509 if (bailout_on_minus_zero) {
1510 Label done;
1511 __ Xor(at, left, right);
1512 __ Branch(&done, ge, at, Operand(zero_reg));
1513 // Bail out if the result is minus zero.
1514 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
1515 Operand(zero_reg));
1516 __ bind(&done);
1517 }
1518 }
1519}
1520
1521
1522void LCodeGen::DoBitI(LBitI* instr) {
1523 LOperand* left_op = instr->left();
1524 LOperand* right_op = instr->right();
1525 DCHECK(left_op->IsRegister());
1526 Register left = ToRegister(left_op);
1527 Register result = ToRegister(instr->result());
1528 Operand right(no_reg);
1529
1530 if (right_op->IsStackSlot()) {
1531 right = Operand(EmitLoadRegister(right_op, at));
1532 } else {
1533 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1534 right = ToOperand(right_op);
1535 }
1536
1537 switch (instr->op()) {
1538 case Token::BIT_AND:
1539 __ And(result, left, right);
1540 break;
1541 case Token::BIT_OR:
1542 __ Or(result, left, right);
1543 break;
1544 case Token::BIT_XOR:
1545 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1546 __ Nor(result, zero_reg, left);
1547 } else {
1548 __ Xor(result, left, right);
1549 }
1550 break;
1551 default:
1552 UNREACHABLE();
1553 break;
1554 }
1555}
1556
1557
1558void LCodeGen::DoShiftI(LShiftI* instr) {
1559 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1560 // result may alias either of them.
1561 LOperand* right_op = instr->right();
1562 Register left = ToRegister(instr->left());
1563 Register result = ToRegister(instr->result());
1564
1565 if (right_op->IsRegister()) {
1566 // No need to mask the right operand on MIPS, it is built into the variable
1567 // shift instructions.
1568 switch (instr->op()) {
1569 case Token::ROR:
1570 __ Ror(result, left, Operand(ToRegister(right_op)));
1571 break;
1572 case Token::SAR:
1573 __ srav(result, left, ToRegister(right_op));
1574 break;
1575 case Token::SHR:
1576 __ srlv(result, left, ToRegister(right_op));
1577 if (instr->can_deopt()) {
1578 // TODO(yy): (-1) >>> 0. anything else?
1579 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
1580 Operand(zero_reg));
1581 DeoptimizeIf(gt, instr, Deoptimizer::kNegativeValue, result,
1582 Operand(kMaxInt));
1583 }
1584 break;
1585 case Token::SHL:
1586 __ sllv(result, left, ToRegister(right_op));
1587 break;
1588 default:
1589 UNREACHABLE();
1590 break;
1591 }
1592 } else {
1593 // Mask the right_op operand.
1594 int value = ToInteger32(LConstantOperand::cast(right_op));
1595 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1596 switch (instr->op()) {
1597 case Token::ROR:
1598 if (shift_count != 0) {
1599 __ Ror(result, left, Operand(shift_count));
1600 } else {
1601 __ Move(result, left);
1602 }
1603 break;
1604 case Token::SAR:
1605 if (shift_count != 0) {
1606 __ sra(result, left, shift_count);
1607 } else {
1608 __ Move(result, left);
1609 }
1610 break;
1611 case Token::SHR:
1612 if (shift_count != 0) {
1613 __ srl(result, left, shift_count);
1614 } else {
1615 if (instr->can_deopt()) {
1616 __ And(at, left, Operand(0x80000000));
1617 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
1618 Operand(zero_reg));
1619 }
1620 __ Move(result, left);
1621 }
1622 break;
1623 case Token::SHL:
1624 if (shift_count != 0) {
1625 if (instr->hydrogen_value()->representation().IsSmi()) {
1626 __ dsll(result, left, shift_count);
1627 } else {
1628 __ sll(result, left, shift_count);
1629 }
1630 } else {
1631 __ Move(result, left);
1632 }
1633 break;
1634 default:
1635 UNREACHABLE();
1636 break;
1637 }
1638 }
1639}
1640
1641
1642void LCodeGen::DoSubS(LSubS* instr) {
1643 LOperand* left = instr->left();
1644 LOperand* right = instr->right();
1645 LOperand* result = instr->result();
1646 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1647
1648 if (!can_overflow) {
1649 DCHECK(right->IsRegister() || right->IsConstantOperand());
1650 __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
1651 } else { // can_overflow.
Ben Murdochda12d292016-06-02 14:46:10 +01001652 Register scratch = scratch0();
1653 Label no_overflow_label;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001654 DCHECK(right->IsRegister() || right->IsConstantOperand());
Ben Murdochda12d292016-06-02 14:46:10 +01001655 __ DsubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1656 &no_overflow_label, scratch);
1657 DeoptimizeIf(al, instr);
1658 __ bind(&no_overflow_label);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001659 }
1660}
1661
1662
1663void LCodeGen::DoSubI(LSubI* instr) {
1664 LOperand* left = instr->left();
1665 LOperand* right = instr->right();
1666 LOperand* result = instr->result();
1667 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1668
1669 if (!can_overflow) {
1670 DCHECK(right->IsRegister() || right->IsConstantOperand());
1671 __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1672 } else { // can_overflow.
Ben Murdochda12d292016-06-02 14:46:10 +01001673 Register scratch = scratch0();
1674 Label no_overflow_label;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001675 DCHECK(right->IsRegister() || right->IsConstantOperand());
Ben Murdochda12d292016-06-02 14:46:10 +01001676 __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1677 &no_overflow_label, scratch);
1678 DeoptimizeIf(al, instr);
1679 __ bind(&no_overflow_label);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001680 }
1681}
1682
1683
1684void LCodeGen::DoConstantI(LConstantI* instr) {
1685 __ li(ToRegister(instr->result()), Operand(instr->value()));
1686}
1687
1688
1689void LCodeGen::DoConstantS(LConstantS* instr) {
1690 __ li(ToRegister(instr->result()), Operand(instr->value()));
1691}
1692
1693
1694void LCodeGen::DoConstantD(LConstantD* instr) {
1695 DCHECK(instr->result()->IsDoubleRegister());
1696 DoubleRegister result = ToDoubleRegister(instr->result());
1697 double v = instr->value();
1698 __ Move(result, v);
1699}
1700
1701
1702void LCodeGen::DoConstantE(LConstantE* instr) {
1703 __ li(ToRegister(instr->result()), Operand(instr->value()));
1704}
1705
1706
1707void LCodeGen::DoConstantT(LConstantT* instr) {
1708 Handle<Object> object = instr->value(isolate());
1709 AllowDeferredHandleDereference smi_check;
1710 __ li(ToRegister(instr->result()), object);
1711}
1712
1713
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001714MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1715 LOperand* index,
1716 String::Encoding encoding) {
1717 if (index->IsConstantOperand()) {
1718 int offset = ToInteger32(LConstantOperand::cast(index));
1719 if (encoding == String::TWO_BYTE_ENCODING) {
1720 offset *= kUC16Size;
1721 }
1722 STATIC_ASSERT(kCharSize == 1);
1723 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1724 }
1725 Register scratch = scratch0();
1726 DCHECK(!scratch.is(string));
1727 DCHECK(!scratch.is(ToRegister(index)));
1728 if (encoding == String::ONE_BYTE_ENCODING) {
1729 __ Daddu(scratch, string, ToRegister(index));
1730 } else {
1731 STATIC_ASSERT(kUC16Size == 2);
1732 __ dsll(scratch, ToRegister(index), 1);
1733 __ Daddu(scratch, string, scratch);
1734 }
1735 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1736}
1737
1738
1739void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1740 String::Encoding encoding = instr->hydrogen()->encoding();
1741 Register string = ToRegister(instr->string());
1742 Register result = ToRegister(instr->result());
1743
1744 if (FLAG_debug_code) {
1745 Register scratch = scratch0();
1746 __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1747 __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1748
1749 __ And(scratch, scratch,
1750 Operand(kStringRepresentationMask | kStringEncodingMask));
1751 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1752 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1753 __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1754 ? one_byte_seq_type : two_byte_seq_type));
1755 __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1756 }
1757
1758 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1759 if (encoding == String::ONE_BYTE_ENCODING) {
1760 __ lbu(result, operand);
1761 } else {
1762 __ lhu(result, operand);
1763 }
1764}
1765
1766
1767void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1768 String::Encoding encoding = instr->hydrogen()->encoding();
1769 Register string = ToRegister(instr->string());
1770 Register value = ToRegister(instr->value());
1771
1772 if (FLAG_debug_code) {
1773 Register scratch = scratch0();
1774 Register index = ToRegister(instr->index());
1775 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1776 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1777 int encoding_mask =
1778 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1779 ? one_byte_seq_type : two_byte_seq_type;
1780 __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1781 }
1782
1783 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1784 if (encoding == String::ONE_BYTE_ENCODING) {
1785 __ sb(value, operand);
1786 } else {
1787 __ sh(value, operand);
1788 }
1789}
1790
1791
1792void LCodeGen::DoAddE(LAddE* instr) {
1793 LOperand* result = instr->result();
1794 LOperand* left = instr->left();
1795 LOperand* right = instr->right();
1796
1797 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1798 DCHECK(right->IsRegister() || right->IsConstantOperand());
1799 __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
1800}
1801
1802
1803void LCodeGen::DoAddS(LAddS* instr) {
1804 LOperand* left = instr->left();
1805 LOperand* right = instr->right();
1806 LOperand* result = instr->result();
1807 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1808
1809 if (!can_overflow) {
1810 DCHECK(right->IsRegister() || right->IsConstantOperand());
1811 __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
1812 } else { // can_overflow.
Ben Murdochda12d292016-06-02 14:46:10 +01001813 Label no_overflow_label;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001814 Register scratch = scratch1();
1815 DCHECK(right->IsRegister() || right->IsConstantOperand());
Ben Murdochda12d292016-06-02 14:46:10 +01001816 __ DaddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1817 &no_overflow_label, scratch);
1818 DeoptimizeIf(al, instr);
1819 __ bind(&no_overflow_label);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001820 }
1821}
1822
1823
1824void LCodeGen::DoAddI(LAddI* instr) {
1825 LOperand* left = instr->left();
1826 LOperand* right = instr->right();
1827 LOperand* result = instr->result();
1828 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1829
1830 if (!can_overflow) {
1831 DCHECK(right->IsRegister() || right->IsConstantOperand());
1832 __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1833 } else { // can_overflow.
Ben Murdochda12d292016-06-02 14:46:10 +01001834 Label no_overflow_label;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001835 Register scratch = scratch1();
1836 DCHECK(right->IsRegister() || right->IsConstantOperand());
Ben Murdochda12d292016-06-02 14:46:10 +01001837 __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1838 &no_overflow_label, scratch);
1839 DeoptimizeIf(al, instr);
1840 __ bind(&no_overflow_label);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001841 }
1842}
1843
1844
1845void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1846 LOperand* left = instr->left();
1847 LOperand* right = instr->right();
1848 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1849 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1850 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1851 Register left_reg = ToRegister(left);
1852 Register right_reg = EmitLoadRegister(right, scratch0());
1853 Register result_reg = ToRegister(instr->result());
1854 Label return_right, done;
1855 Register scratch = scratch1();
1856 __ Slt(scratch, left_reg, Operand(right_reg));
1857 if (condition == ge) {
1858 __ Movz(result_reg, left_reg, scratch);
1859 __ Movn(result_reg, right_reg, scratch);
1860 } else {
1861 DCHECK(condition == le);
1862 __ Movn(result_reg, left_reg, scratch);
1863 __ Movz(result_reg, right_reg, scratch);
1864 }
1865 } else {
1866 DCHECK(instr->hydrogen()->representation().IsDouble());
1867 FPURegister left_reg = ToDoubleRegister(left);
1868 FPURegister right_reg = ToDoubleRegister(right);
1869 FPURegister result_reg = ToDoubleRegister(instr->result());
1870 Label check_nan_left, check_zero, return_left, return_right, done;
1871 __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1872 __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1873 __ Branch(&return_right);
1874
1875 __ bind(&check_zero);
1876 // left == right != 0.
1877 __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1878 // At this point, both left and right are either 0 or -0.
1879 if (operation == HMathMinMax::kMathMin) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001880 // The algorithm is: -((-L) + (-R)), which in case of L and R being
1881 // different registers is most efficiently expressed as -((-L) - R).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001882 __ neg_d(left_reg, left_reg);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001883 if (left_reg.is(right_reg)) {
1884 __ add_d(result_reg, left_reg, right_reg);
1885 } else {
1886 __ sub_d(result_reg, left_reg, right_reg);
1887 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001888 __ neg_d(result_reg, result_reg);
1889 } else {
1890 __ add_d(result_reg, left_reg, right_reg);
1891 }
1892 __ Branch(&done);
1893
1894 __ bind(&check_nan_left);
1895 // left == NaN.
1896 __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1897 __ bind(&return_right);
1898 if (!right_reg.is(result_reg)) {
1899 __ mov_d(result_reg, right_reg);
1900 }
1901 __ Branch(&done);
1902
1903 __ bind(&return_left);
1904 if (!left_reg.is(result_reg)) {
1905 __ mov_d(result_reg, left_reg);
1906 }
1907 __ bind(&done);
1908 }
1909}
1910
1911
1912void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1913 DoubleRegister left = ToDoubleRegister(instr->left());
1914 DoubleRegister right = ToDoubleRegister(instr->right());
1915 DoubleRegister result = ToDoubleRegister(instr->result());
1916 switch (instr->op()) {
1917 case Token::ADD:
1918 __ add_d(result, left, right);
1919 break;
1920 case Token::SUB:
1921 __ sub_d(result, left, right);
1922 break;
1923 case Token::MUL:
1924 __ mul_d(result, left, right);
1925 break;
1926 case Token::DIV:
1927 __ div_d(result, left, right);
1928 break;
1929 case Token::MOD: {
1930 // Save a0-a3 on the stack.
1931 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1932 __ MultiPush(saved_regs);
1933
1934 __ PrepareCallCFunction(0, 2, scratch0());
1935 __ MovToFloatParameters(left, right);
1936 __ CallCFunction(
1937 ExternalReference::mod_two_doubles_operation(isolate()),
1938 0, 2);
1939 // Move the result in the double result register.
1940 __ MovFromFloatResult(result);
1941
1942 // Restore saved register.
1943 __ MultiPop(saved_regs);
1944 break;
1945 }
1946 default:
1947 UNREACHABLE();
1948 break;
1949 }
1950}
1951
1952
1953void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1954 DCHECK(ToRegister(instr->context()).is(cp));
1955 DCHECK(ToRegister(instr->left()).is(a1));
1956 DCHECK(ToRegister(instr->right()).is(a0));
1957 DCHECK(ToRegister(instr->result()).is(v0));
1958
Ben Murdoch097c5b22016-05-18 11:27:45 +01001959 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001960 CallCode(code, RelocInfo::CODE_TARGET, instr);
1961 // Other arch use a nop here, to signal that there is no inlined
1962 // patchable code. Mips does not need the nop, since our marker
1963 // instruction (andi zero_reg) will never be used in normal code.
1964}
1965
1966
1967template<class InstrType>
1968void LCodeGen::EmitBranch(InstrType instr,
1969 Condition condition,
1970 Register src1,
1971 const Operand& src2) {
1972 int left_block = instr->TrueDestination(chunk_);
1973 int right_block = instr->FalseDestination(chunk_);
1974
1975 int next_block = GetNextEmittedBlock();
1976 if (right_block == left_block || condition == al) {
1977 EmitGoto(left_block);
1978 } else if (left_block == next_block) {
1979 __ Branch(chunk_->GetAssemblyLabel(right_block),
1980 NegateCondition(condition), src1, src2);
1981 } else if (right_block == next_block) {
1982 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1983 } else {
1984 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1985 __ Branch(chunk_->GetAssemblyLabel(right_block));
1986 }
1987}
1988
1989
1990template<class InstrType>
1991void LCodeGen::EmitBranchF(InstrType instr,
1992 Condition condition,
1993 FPURegister src1,
1994 FPURegister src2) {
1995 int right_block = instr->FalseDestination(chunk_);
1996 int left_block = instr->TrueDestination(chunk_);
1997
1998 int next_block = GetNextEmittedBlock();
1999 if (right_block == left_block) {
2000 EmitGoto(left_block);
2001 } else if (left_block == next_block) {
2002 __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
2003 NegateFpuCondition(condition), src1, src2);
2004 } else if (right_block == next_block) {
2005 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2006 condition, src1, src2);
2007 } else {
2008 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2009 condition, src1, src2);
2010 __ Branch(chunk_->GetAssemblyLabel(right_block));
2011 }
2012}
2013
2014
2015template <class InstrType>
2016void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
2017 Register src1, const Operand& src2) {
2018 int true_block = instr->TrueDestination(chunk_);
2019 __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
2020}
2021
2022
2023template <class InstrType>
2024void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
2025 Register src1, const Operand& src2) {
2026 int false_block = instr->FalseDestination(chunk_);
2027 __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
2028}
2029
2030
2031template<class InstrType>
2032void LCodeGen::EmitFalseBranchF(InstrType instr,
2033 Condition condition,
2034 FPURegister src1,
2035 FPURegister src2) {
2036 int false_block = instr->FalseDestination(chunk_);
2037 __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
2038 condition, src1, src2);
2039}
2040
2041
2042void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2043 __ stop("LDebugBreak");
2044}
2045
2046
2047void LCodeGen::DoBranch(LBranch* instr) {
2048 Representation r = instr->hydrogen()->value()->representation();
2049 if (r.IsInteger32() || r.IsSmi()) {
2050 DCHECK(!info()->IsStub());
2051 Register reg = ToRegister(instr->value());
2052 EmitBranch(instr, ne, reg, Operand(zero_reg));
2053 } else if (r.IsDouble()) {
2054 DCHECK(!info()->IsStub());
2055 DoubleRegister reg = ToDoubleRegister(instr->value());
2056 // Test the double value. Zero and NaN are false.
2057 EmitBranchF(instr, ogl, reg, kDoubleRegZero);
2058 } else {
2059 DCHECK(r.IsTagged());
2060 Register reg = ToRegister(instr->value());
2061 HType type = instr->hydrogen()->value()->type();
2062 if (type.IsBoolean()) {
2063 DCHECK(!info()->IsStub());
2064 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2065 EmitBranch(instr, eq, reg, Operand(at));
2066 } else if (type.IsSmi()) {
2067 DCHECK(!info()->IsStub());
2068 EmitBranch(instr, ne, reg, Operand(zero_reg));
2069 } else if (type.IsJSArray()) {
2070 DCHECK(!info()->IsStub());
2071 EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2072 } else if (type.IsHeapNumber()) {
2073 DCHECK(!info()->IsStub());
2074 DoubleRegister dbl_scratch = double_scratch0();
2075 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2076 // Test the double value. Zero and NaN are false.
2077 EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
2078 } else if (type.IsString()) {
2079 DCHECK(!info()->IsStub());
2080 __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
2081 EmitBranch(instr, ne, at, Operand(zero_reg));
2082 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01002083 ToBooleanICStub::Types expected =
2084 instr->hydrogen()->expected_input_types();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002085 // Avoid deopts in the case where we've never executed this path before.
Ben Murdochda12d292016-06-02 14:46:10 +01002086 if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002087
Ben Murdochda12d292016-06-02 14:46:10 +01002088 if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002089 // undefined -> false.
2090 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2091 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2092 }
Ben Murdochda12d292016-06-02 14:46:10 +01002093 if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002094 // Boolean -> its value.
2095 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2096 __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2097 __ LoadRoot(at, Heap::kFalseValueRootIndex);
2098 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2099 }
Ben Murdochda12d292016-06-02 14:46:10 +01002100 if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002101 // 'null' -> false.
2102 __ LoadRoot(at, Heap::kNullValueRootIndex);
2103 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2104 }
2105
Ben Murdochda12d292016-06-02 14:46:10 +01002106 if (expected.Contains(ToBooleanICStub::SMI)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002107 // Smis: 0 -> false, all other -> true.
2108 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2109 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2110 } else if (expected.NeedsMap()) {
2111 // If we need a map later and have a Smi -> deopt.
2112 __ SmiTst(reg, at);
2113 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
2114 }
2115
2116 const Register map = scratch0();
2117 if (expected.NeedsMap()) {
2118 __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2119 if (expected.CanBeUndetectable()) {
2120 // Undetectable -> false.
2121 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2122 __ And(at, at, Operand(1 << Map::kIsUndetectable));
2123 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2124 }
2125 }
2126
Ben Murdochda12d292016-06-02 14:46:10 +01002127 if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002128 // spec object -> true.
2129 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2130 __ Branch(instr->TrueLabel(chunk_),
2131 ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
2132 }
2133
Ben Murdochda12d292016-06-02 14:46:10 +01002134 if (expected.Contains(ToBooleanICStub::STRING)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002135 // String value -> false iff empty.
2136 Label not_string;
2137 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2138 __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2139 __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
2140 __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2141 __ Branch(instr->FalseLabel(chunk_));
2142 __ bind(&not_string);
2143 }
2144
Ben Murdochda12d292016-06-02 14:46:10 +01002145 if (expected.Contains(ToBooleanICStub::SYMBOL)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002146 // Symbol value -> true.
2147 const Register scratch = scratch1();
2148 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2149 __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2150 }
2151
Ben Murdochda12d292016-06-02 14:46:10 +01002152 if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002153 // SIMD value -> true.
2154 const Register scratch = scratch1();
2155 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2156 __ Branch(instr->TrueLabel(chunk_), eq, scratch,
2157 Operand(SIMD128_VALUE_TYPE));
2158 }
2159
Ben Murdochda12d292016-06-02 14:46:10 +01002160 if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002161 // heap number -> false iff +0, -0, or NaN.
2162 DoubleRegister dbl_scratch = double_scratch0();
2163 Label not_heap_number;
2164 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2165 __ Branch(&not_heap_number, ne, map, Operand(at));
2166 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2167 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2168 ne, dbl_scratch, kDoubleRegZero);
2169 // Falls through if dbl_scratch == 0.
2170 __ Branch(instr->FalseLabel(chunk_));
2171 __ bind(&not_heap_number);
2172 }
2173
2174 if (!expected.IsGeneric()) {
2175 // We've seen something for the first time -> deopt.
2176 // This can only happen if we are not generic already.
2177 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
2178 Operand(zero_reg));
2179 }
2180 }
2181 }
2182}
2183
2184
2185void LCodeGen::EmitGoto(int block) {
2186 if (!IsNextEmittedBlock(block)) {
2187 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2188 }
2189}
2190
2191
2192void LCodeGen::DoGoto(LGoto* instr) {
2193 EmitGoto(instr->block_id());
2194}
2195
2196
2197Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2198 Condition cond = kNoCondition;
2199 switch (op) {
2200 case Token::EQ:
2201 case Token::EQ_STRICT:
2202 cond = eq;
2203 break;
2204 case Token::NE:
2205 case Token::NE_STRICT:
2206 cond = ne;
2207 break;
2208 case Token::LT:
2209 cond = is_unsigned ? lo : lt;
2210 break;
2211 case Token::GT:
2212 cond = is_unsigned ? hi : gt;
2213 break;
2214 case Token::LTE:
2215 cond = is_unsigned ? ls : le;
2216 break;
2217 case Token::GTE:
2218 cond = is_unsigned ? hs : ge;
2219 break;
2220 case Token::IN:
2221 case Token::INSTANCEOF:
2222 default:
2223 UNREACHABLE();
2224 }
2225 return cond;
2226}
2227
2228
2229void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2230 LOperand* left = instr->left();
2231 LOperand* right = instr->right();
2232 bool is_unsigned =
2233 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2234 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2235 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2236
2237 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2238 // We can statically evaluate the comparison.
2239 double left_val = ToDouble(LConstantOperand::cast(left));
2240 double right_val = ToDouble(LConstantOperand::cast(right));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002241 int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2242 ? instr->TrueDestination(chunk_)
2243 : instr->FalseDestination(chunk_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002244 EmitGoto(next_block);
2245 } else {
2246 if (instr->is_double()) {
2247 // Compare left and right as doubles and load the
2248 // resulting flags into the normal status register.
2249 FPURegister left_reg = ToDoubleRegister(left);
2250 FPURegister right_reg = ToDoubleRegister(right);
2251
2252 // If a NaN is involved, i.e. the result is unordered,
2253 // jump to false block label.
2254 __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2255 left_reg, right_reg);
2256
2257 EmitBranchF(instr, cond, left_reg, right_reg);
2258 } else {
2259 Register cmp_left;
2260 Operand cmp_right = Operand((int64_t)0);
2261 if (right->IsConstantOperand()) {
2262 int32_t value = ToInteger32(LConstantOperand::cast(right));
2263 if (instr->hydrogen_value()->representation().IsSmi()) {
2264 cmp_left = ToRegister(left);
2265 cmp_right = Operand(Smi::FromInt(value));
2266 } else {
2267 cmp_left = ToRegister(left);
2268 cmp_right = Operand(value);
2269 }
2270 } else if (left->IsConstantOperand()) {
2271 int32_t value = ToInteger32(LConstantOperand::cast(left));
2272 if (instr->hydrogen_value()->representation().IsSmi()) {
2273 cmp_left = ToRegister(right);
2274 cmp_right = Operand(Smi::FromInt(value));
2275 } else {
2276 cmp_left = ToRegister(right);
2277 cmp_right = Operand(value);
2278 }
2279 // We commuted the operands, so commute the condition.
2280 cond = CommuteCondition(cond);
2281 } else {
2282 cmp_left = ToRegister(left);
2283 cmp_right = Operand(ToRegister(right));
2284 }
2285
2286 EmitBranch(instr, cond, cmp_left, cmp_right);
2287 }
2288 }
2289}
2290
2291
2292void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2293 Register left = ToRegister(instr->left());
2294 Register right = ToRegister(instr->right());
2295
2296 EmitBranch(instr, eq, left, Operand(right));
2297}
2298
2299
2300void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2301 if (instr->hydrogen()->representation().IsTagged()) {
2302 Register input_reg = ToRegister(instr->object());
2303 __ li(at, Operand(factory()->the_hole_value()));
2304 EmitBranch(instr, eq, input_reg, Operand(at));
2305 return;
2306 }
2307
2308 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2309 EmitFalseBranchF(instr, eq, input_reg, input_reg);
2310
2311 Register scratch = scratch0();
2312 __ FmoveHigh(scratch, input_reg);
2313 EmitBranch(instr, eq, scratch,
2314 Operand(static_cast<int32_t>(kHoleNanUpper32)));
2315}
2316
2317
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002318Condition LCodeGen::EmitIsString(Register input,
2319 Register temp1,
2320 Label* is_not_string,
2321 SmiCheck check_needed = INLINE_SMI_CHECK) {
2322 if (check_needed == INLINE_SMI_CHECK) {
2323 __ JumpIfSmi(input, is_not_string);
2324 }
2325 __ GetObjectType(input, temp1, temp1);
2326
2327 return lt;
2328}
2329
2330
2331void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2332 Register reg = ToRegister(instr->value());
2333 Register temp1 = ToRegister(instr->temp());
2334
2335 SmiCheck check_needed =
2336 instr->hydrogen()->value()->type().IsHeapObject()
2337 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2338 Condition true_cond =
2339 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2340
2341 EmitBranch(instr, true_cond, temp1,
2342 Operand(FIRST_NONSTRING_TYPE));
2343}
2344
2345
2346void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2347 Register input_reg = EmitLoadRegister(instr->value(), at);
2348 __ And(at, input_reg, kSmiTagMask);
2349 EmitBranch(instr, eq, at, Operand(zero_reg));
2350}
2351
2352
2353void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2354 Register input = ToRegister(instr->value());
2355 Register temp = ToRegister(instr->temp());
2356
2357 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2358 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2359 }
2360 __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2361 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2362 __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2363 EmitBranch(instr, ne, at, Operand(zero_reg));
2364}
2365
2366
2367static Condition ComputeCompareCondition(Token::Value op) {
2368 switch (op) {
2369 case Token::EQ_STRICT:
2370 case Token::EQ:
2371 return eq;
2372 case Token::LT:
2373 return lt;
2374 case Token::GT:
2375 return gt;
2376 case Token::LTE:
2377 return le;
2378 case Token::GTE:
2379 return ge;
2380 default:
2381 UNREACHABLE();
2382 return kNoCondition;
2383 }
2384}
2385
2386
2387void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2388 DCHECK(ToRegister(instr->context()).is(cp));
2389 DCHECK(ToRegister(instr->left()).is(a1));
2390 DCHECK(ToRegister(instr->right()).is(a0));
2391
Ben Murdochda12d292016-06-02 14:46:10 +01002392 Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002393 CallCode(code, RelocInfo::CODE_TARGET, instr);
Ben Murdochda12d292016-06-02 14:46:10 +01002394 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2395 EmitBranch(instr, eq, v0, Operand(at));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002396}
2397
2398
2399static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2400 InstanceType from = instr->from();
2401 InstanceType to = instr->to();
2402 if (from == FIRST_TYPE) return to;
2403 DCHECK(from == to || to == LAST_TYPE);
2404 return from;
2405}
2406
2407
2408static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2409 InstanceType from = instr->from();
2410 InstanceType to = instr->to();
2411 if (from == to) return eq;
2412 if (to == LAST_TYPE) return hs;
2413 if (from == FIRST_TYPE) return ls;
2414 UNREACHABLE();
2415 return eq;
2416}
2417
2418
2419void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2420 Register scratch = scratch0();
2421 Register input = ToRegister(instr->value());
2422
2423 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2424 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2425 }
2426
2427 __ GetObjectType(input, scratch, scratch);
2428 EmitBranch(instr,
2429 BranchCondition(instr->hydrogen()),
2430 scratch,
2431 Operand(TestType(instr->hydrogen())));
2432}
2433
2434
2435void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2436 Register input = ToRegister(instr->value());
2437 Register result = ToRegister(instr->result());
2438
2439 __ AssertString(input);
2440
2441 __ lwu(result, FieldMemOperand(input, String::kHashFieldOffset));
2442 __ IndexFromHash(result, result);
2443}
2444
2445
2446void LCodeGen::DoHasCachedArrayIndexAndBranch(
2447 LHasCachedArrayIndexAndBranch* instr) {
2448 Register input = ToRegister(instr->value());
2449 Register scratch = scratch0();
2450
2451 __ lwu(scratch,
2452 FieldMemOperand(input, String::kHashFieldOffset));
2453 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2454 EmitBranch(instr, eq, at, Operand(zero_reg));
2455}
2456
2457
2458// Branches to a label or falls through with the answer in flags. Trashes
2459// the temp registers, but not the input.
2460void LCodeGen::EmitClassOfTest(Label* is_true,
2461 Label* is_false,
2462 Handle<String>class_name,
2463 Register input,
2464 Register temp,
2465 Register temp2) {
2466 DCHECK(!input.is(temp));
2467 DCHECK(!input.is(temp2));
2468 DCHECK(!temp.is(temp2));
2469
2470 __ JumpIfSmi(input, is_false);
2471
2472 __ GetObjectType(input, temp, temp2);
Ben Murdochda12d292016-06-02 14:46:10 +01002473 STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002474 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002475 __ Branch(is_true, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002476 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01002477 __ Branch(is_false, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002478 }
2479
2480 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2481 // Check if the constructor in the map is a function.
2482 Register instance_type = scratch1();
2483 DCHECK(!instance_type.is(temp));
2484 __ GetMapConstructor(temp, temp, temp2, instance_type);
2485
2486 // Objects with a non-function constructor have class 'Object'.
2487 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2488 __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2489 } else {
2490 __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2491 }
2492
2493 // temp now contains the constructor function. Grab the
2494 // instance class name from there.
2495 __ ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2496 __ ld(temp, FieldMemOperand(temp,
2497 SharedFunctionInfo::kInstanceClassNameOffset));
2498 // The class name we are testing against is internalized since it's a literal.
2499 // The name in the constructor is internalized because of the way the context
2500 // is booted. This routine isn't expected to work for random API-created
2501 // classes and it doesn't have to because you can't access it with natives
2502 // syntax. Since both sides are internalized it is sufficient to use an
2503 // identity comparison.
2504
2505 // End with the address of this class_name instance in temp register.
2506 // On MIPS, the caller must do the comparison with Handle<String>class_name.
2507}
2508
2509
2510void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2511 Register input = ToRegister(instr->value());
2512 Register temp = scratch0();
2513 Register temp2 = ToRegister(instr->temp());
2514 Handle<String> class_name = instr->hydrogen()->class_name();
2515
2516 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2517 class_name, input, temp, temp2);
2518
2519 EmitBranch(instr, eq, temp, Operand(class_name));
2520}
2521
2522
2523void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2524 Register reg = ToRegister(instr->value());
2525 Register temp = ToRegister(instr->temp());
2526
2527 __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2528 EmitBranch(instr, eq, temp, Operand(instr->map()));
2529}
2530
2531
2532void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2533 DCHECK(ToRegister(instr->context()).is(cp));
2534 Label true_label, done;
2535 DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
2536 DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
2537 DCHECK(ToRegister(instr->result()).is(v0));
2538
2539 InstanceOfStub stub(isolate());
2540 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2541}
2542
2543
2544void LCodeGen::DoHasInPrototypeChainAndBranch(
2545 LHasInPrototypeChainAndBranch* instr) {
2546 Register const object = ToRegister(instr->object());
2547 Register const object_map = scratch0();
2548 Register const object_instance_type = scratch1();
2549 Register const object_prototype = object_map;
2550 Register const prototype = ToRegister(instr->prototype());
2551
2552 // The {object} must be a spec object. It's sufficient to know that {object}
2553 // is not a smi, since all other non-spec objects have {null} prototypes and
2554 // will be ruled out below.
2555 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2556 __ SmiTst(object, at);
2557 EmitFalseBranch(instr, eq, at, Operand(zero_reg));
2558 }
2559
2560 // Loop through the {object}s prototype chain looking for the {prototype}.
2561 __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2562 Label loop;
2563 __ bind(&loop);
2564
2565 // Deoptimize if the object needs to be access checked.
2566 __ lbu(object_instance_type,
2567 FieldMemOperand(object_map, Map::kBitFieldOffset));
2568 __ And(object_instance_type, object_instance_type,
2569 Operand(1 << Map::kIsAccessCheckNeeded));
2570 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
2571 Operand(zero_reg));
2572 __ lbu(object_instance_type,
2573 FieldMemOperand(object_map, Map::kInstanceTypeOffset));
2574 DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
2575 Operand(JS_PROXY_TYPE));
2576
2577 __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
2578 EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
2579 __ LoadRoot(at, Heap::kNullValueRootIndex);
2580 EmitFalseBranch(instr, eq, object_prototype, Operand(at));
2581 __ Branch(&loop, USE_DELAY_SLOT);
2582 __ ld(object_map, FieldMemOperand(object_prototype,
2583 HeapObject::kMapOffset)); // In delay slot.
2584}
2585
2586
2587void LCodeGen::DoCmpT(LCmpT* instr) {
2588 DCHECK(ToRegister(instr->context()).is(cp));
2589 Token::Value op = instr->op();
2590
Ben Murdoch097c5b22016-05-18 11:27:45 +01002591 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002592 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2593 // On MIPS there is no need for a "no inlined smi code" marker (nop).
2594
2595 Condition condition = ComputeCompareCondition(op);
2596 // A minor optimization that relies on LoadRoot always emitting one
2597 // instruction.
2598 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2599 Label done, check;
2600 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2601 __ bind(&check);
2602 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2603 DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
2604 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2605 __ bind(&done);
2606}
2607
2608
2609void LCodeGen::DoReturn(LReturn* instr) {
2610 if (FLAG_trace && info()->IsOptimizing()) {
2611 // Push the return value on the stack as the parameter.
2612 // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2613 // managed by the register allocator and tearing down the frame, it's
2614 // safe to write to the context register.
2615 __ push(v0);
2616 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2617 __ CallRuntime(Runtime::kTraceExit);
2618 }
2619 if (info()->saves_caller_doubles()) {
2620 RestoreCallerDoubles();
2621 }
2622 if (NeedsEagerFrame()) {
2623 __ mov(sp, fp);
2624 __ Pop(ra, fp);
2625 }
2626 if (instr->has_constant_parameter_count()) {
2627 int parameter_count = ToInteger32(instr->constant_parameter_count());
2628 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2629 if (sp_delta != 0) {
2630 __ Daddu(sp, sp, Operand(sp_delta));
2631 }
2632 } else {
2633 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2634 Register reg = ToRegister(instr->parameter_count());
2635 // The argument count parameter is a smi
2636 __ SmiUntag(reg);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002637 __ Dlsa(sp, sp, reg, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002638 }
2639
2640 __ Jump(ra);
2641}
2642
2643
2644template <class T>
2645void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2646 Register vector_register = ToRegister(instr->temp_vector());
2647 Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2648 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2649 DCHECK(slot_register.is(a0));
2650
2651 AllowDeferredHandleDereference vector_structure_check;
2652 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2653 __ li(vector_register, vector);
2654 // No need to allocate this register.
2655 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2656 int index = vector->GetIndex(slot);
2657 __ li(slot_register, Operand(Smi::FromInt(index)));
2658}
2659
2660
2661template <class T>
2662void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2663 Register vector_register = ToRegister(instr->temp_vector());
2664 Register slot_register = ToRegister(instr->temp_slot());
2665
2666 AllowDeferredHandleDereference vector_structure_check;
2667 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2668 __ li(vector_register, vector);
2669 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2670 int index = vector->GetIndex(slot);
2671 __ li(slot_register, Operand(Smi::FromInt(index)));
2672}
2673
2674
2675void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2676 DCHECK(ToRegister(instr->context()).is(cp));
2677 DCHECK(ToRegister(instr->global_object())
2678 .is(LoadDescriptor::ReceiverRegister()));
2679 DCHECK(ToRegister(instr->result()).is(v0));
2680
2681 __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2682 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002683 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2684 isolate(), instr->typeof_mode(), PREMONOMORPHIC)
2685 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002686 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2687}
2688
2689
2690void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2691 Register context = ToRegister(instr->context());
2692 Register result = ToRegister(instr->result());
2693
2694 __ ld(result, ContextMemOperand(context, instr->slot_index()));
2695 if (instr->hydrogen()->RequiresHoleCheck()) {
2696 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2697
2698 if (instr->hydrogen()->DeoptimizesOnHole()) {
2699 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
2700 } else {
2701 Label is_not_hole;
2702 __ Branch(&is_not_hole, ne, result, Operand(at));
2703 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2704 __ bind(&is_not_hole);
2705 }
2706 }
2707}
2708
2709
2710void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2711 Register context = ToRegister(instr->context());
2712 Register value = ToRegister(instr->value());
2713 Register scratch = scratch0();
2714 MemOperand target = ContextMemOperand(context, instr->slot_index());
2715
2716 Label skip_assignment;
2717
2718 if (instr->hydrogen()->RequiresHoleCheck()) {
2719 __ ld(scratch, target);
2720 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2721
2722 if (instr->hydrogen()->DeoptimizesOnHole()) {
2723 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
2724 } else {
2725 __ Branch(&skip_assignment, ne, scratch, Operand(at));
2726 }
2727 }
2728
2729 __ sd(value, target);
2730 if (instr->hydrogen()->NeedsWriteBarrier()) {
2731 SmiCheck check_needed =
2732 instr->hydrogen()->value()->type().IsHeapObject()
2733 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2734 __ RecordWriteContextSlot(context,
2735 target.offset(),
2736 value,
2737 scratch0(),
2738 GetRAState(),
2739 kSaveFPRegs,
2740 EMIT_REMEMBERED_SET,
2741 check_needed);
2742 }
2743
2744 __ bind(&skip_assignment);
2745}
2746
2747
2748void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2749 HObjectAccess access = instr->hydrogen()->access();
2750 int offset = access.offset();
2751 Register object = ToRegister(instr->object());
2752 if (access.IsExternalMemory()) {
2753 Register result = ToRegister(instr->result());
2754 MemOperand operand = MemOperand(object, offset);
2755 __ Load(result, operand, access.representation());
2756 return;
2757 }
2758
2759 if (instr->hydrogen()->representation().IsDouble()) {
2760 DoubleRegister result = ToDoubleRegister(instr->result());
2761 __ ldc1(result, FieldMemOperand(object, offset));
2762 return;
2763 }
2764
2765 Register result = ToRegister(instr->result());
2766 if (!access.IsInobject()) {
2767 __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2768 object = result;
2769 }
2770
2771 Representation representation = access.representation();
2772 if (representation.IsSmi() && SmiValuesAre32Bits() &&
2773 instr->hydrogen()->representation().IsInteger32()) {
2774 if (FLAG_debug_code) {
2775 // Verify this is really an Smi.
2776 Register scratch = scratch0();
2777 __ Load(scratch, FieldMemOperand(object, offset), representation);
2778 __ AssertSmi(scratch);
2779 }
2780
2781 // Read int value directly from upper half of the smi.
2782 STATIC_ASSERT(kSmiTag == 0);
2783 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
2784 offset = SmiWordOffset(offset);
2785 representation = Representation::Integer32();
2786 }
2787 __ Load(result, FieldMemOperand(object, offset), representation);
2788}
2789
2790
2791void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2792 DCHECK(ToRegister(instr->context()).is(cp));
2793 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2794 DCHECK(ToRegister(instr->result()).is(v0));
2795
2796 // Name is always in a2.
2797 __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2798 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002799 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2800 isolate(), NOT_INSIDE_TYPEOF,
2801 instr->hydrogen()->initialization_state())
2802 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002803 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2804}
2805
2806
2807void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2808 Register scratch = scratch0();
2809 Register function = ToRegister(instr->function());
2810 Register result = ToRegister(instr->result());
2811
2812 // Get the prototype or initial map from the function.
2813 __ ld(result,
2814 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2815
2816 // Check that the function has a prototype or an initial map.
2817 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2818 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
2819
2820 // If the function does not have an initial map, we're done.
2821 Label done;
2822 __ GetObjectType(result, scratch, scratch);
2823 __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2824
2825 // Get the prototype from the initial map.
2826 __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
2827
2828 // All done.
2829 __ bind(&done);
2830}
2831
2832
2833void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2834 Register result = ToRegister(instr->result());
2835 __ LoadRoot(result, instr->index());
2836}
2837
2838
2839void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2840 Register arguments = ToRegister(instr->arguments());
2841 Register result = ToRegister(instr->result());
2842 // There are two words between the frame pointer and the last argument.
2843 // Subtracting from length accounts for one of them add one more.
2844 if (instr->length()->IsConstantOperand()) {
2845 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2846 if (instr->index()->IsConstantOperand()) {
2847 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2848 int index = (const_length - const_index) + 1;
2849 __ ld(result, MemOperand(arguments, index * kPointerSize));
2850 } else {
2851 Register index = ToRegister(instr->index());
2852 __ li(at, Operand(const_length + 1));
2853 __ Dsubu(result, at, index);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002854 __ Dlsa(at, arguments, result, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002855 __ ld(result, MemOperand(at));
2856 }
2857 } else if (instr->index()->IsConstantOperand()) {
2858 Register length = ToRegister(instr->length());
2859 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2860 int loc = const_index - 1;
2861 if (loc != 0) {
2862 __ Dsubu(result, length, Operand(loc));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002863 __ Dlsa(at, arguments, result, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002864 __ ld(result, MemOperand(at));
2865 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002866 __ Dlsa(at, arguments, length, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002867 __ ld(result, MemOperand(at));
2868 }
2869 } else {
2870 Register length = ToRegister(instr->length());
2871 Register index = ToRegister(instr->index());
2872 __ Dsubu(result, length, index);
2873 __ Daddu(result, result, 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002874 __ Dlsa(at, arguments, result, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002875 __ ld(result, MemOperand(at));
2876 }
2877}
2878
2879
2880void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2881 Register external_pointer = ToRegister(instr->elements());
2882 Register key = no_reg;
2883 ElementsKind elements_kind = instr->elements_kind();
2884 bool key_is_constant = instr->key()->IsConstantOperand();
2885 int constant_key = 0;
2886 if (key_is_constant) {
2887 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2888 if (constant_key & 0xF0000000) {
2889 Abort(kArrayIndexConstantValueTooBig);
2890 }
2891 } else {
2892 key = ToRegister(instr->key());
2893 }
2894 int element_size_shift = ElementsKindToShiftSize(elements_kind);
2895 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2896 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
2897 : element_size_shift;
2898 int base_offset = instr->base_offset();
2899
2900 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2901 FPURegister result = ToDoubleRegister(instr->result());
2902 if (key_is_constant) {
2903 __ Daddu(scratch0(), external_pointer,
2904 constant_key << element_size_shift);
2905 } else {
2906 if (shift_size < 0) {
2907 if (shift_size == -32) {
2908 __ dsra32(scratch0(), key, 0);
2909 } else {
2910 __ dsra(scratch0(), key, -shift_size);
2911 }
2912 } else {
2913 __ dsll(scratch0(), key, shift_size);
2914 }
2915 __ Daddu(scratch0(), scratch0(), external_pointer);
2916 }
2917 if (elements_kind == FLOAT32_ELEMENTS) {
2918 __ lwc1(result, MemOperand(scratch0(), base_offset));
2919 __ cvt_d_s(result, result);
2920 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2921 __ ldc1(result, MemOperand(scratch0(), base_offset));
2922 }
2923 } else {
2924 Register result = ToRegister(instr->result());
2925 MemOperand mem_operand = PrepareKeyedOperand(
2926 key, external_pointer, key_is_constant, constant_key,
2927 element_size_shift, shift_size, base_offset);
2928 switch (elements_kind) {
2929 case INT8_ELEMENTS:
2930 __ lb(result, mem_operand);
2931 break;
2932 case UINT8_ELEMENTS:
2933 case UINT8_CLAMPED_ELEMENTS:
2934 __ lbu(result, mem_operand);
2935 break;
2936 case INT16_ELEMENTS:
2937 __ lh(result, mem_operand);
2938 break;
2939 case UINT16_ELEMENTS:
2940 __ lhu(result, mem_operand);
2941 break;
2942 case INT32_ELEMENTS:
2943 __ lw(result, mem_operand);
2944 break;
2945 case UINT32_ELEMENTS:
2946 __ lw(result, mem_operand);
2947 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2948 DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
2949 result, Operand(0x80000000));
2950 }
2951 break;
2952 case FLOAT32_ELEMENTS:
2953 case FLOAT64_ELEMENTS:
2954 case FAST_DOUBLE_ELEMENTS:
2955 case FAST_ELEMENTS:
2956 case FAST_SMI_ELEMENTS:
2957 case FAST_HOLEY_DOUBLE_ELEMENTS:
2958 case FAST_HOLEY_ELEMENTS:
2959 case FAST_HOLEY_SMI_ELEMENTS:
2960 case DICTIONARY_ELEMENTS:
2961 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2962 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002963 case FAST_STRING_WRAPPER_ELEMENTS:
2964 case SLOW_STRING_WRAPPER_ELEMENTS:
2965 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002966 UNREACHABLE();
2967 break;
2968 }
2969 }
2970}
2971
2972
2973void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2974 Register elements = ToRegister(instr->elements());
2975 bool key_is_constant = instr->key()->IsConstantOperand();
2976 Register key = no_reg;
2977 DoubleRegister result = ToDoubleRegister(instr->result());
2978 Register scratch = scratch0();
2979
2980 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2981
2982 int base_offset = instr->base_offset();
2983 if (key_is_constant) {
2984 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2985 if (constant_key & 0xF0000000) {
2986 Abort(kArrayIndexConstantValueTooBig);
2987 }
2988 base_offset += constant_key * kDoubleSize;
2989 }
2990 __ Daddu(scratch, elements, Operand(base_offset));
2991
2992 if (!key_is_constant) {
2993 key = ToRegister(instr->key());
2994 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2995 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
2996 : element_size_shift;
2997 if (shift_size > 0) {
2998 __ dsll(at, key, shift_size);
2999 } else if (shift_size == -32) {
3000 __ dsra32(at, key, 0);
3001 } else {
3002 __ dsra(at, key, -shift_size);
3003 }
3004 __ Daddu(scratch, scratch, at);
3005 }
3006
3007 __ ldc1(result, MemOperand(scratch));
3008
3009 if (instr->hydrogen()->RequiresHoleCheck()) {
3010 __ FmoveHigh(scratch, result);
3011 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
3012 Operand(static_cast<int32_t>(kHoleNanUpper32)));
3013 }
3014}
3015
3016
3017void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3018 HLoadKeyed* hinstr = instr->hydrogen();
3019 Register elements = ToRegister(instr->elements());
3020 Register result = ToRegister(instr->result());
3021 Register scratch = scratch0();
3022 Register store_base = scratch;
3023 int offset = instr->base_offset();
3024
3025 if (instr->key()->IsConstantOperand()) {
3026 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3027 offset += ToInteger32(const_operand) * kPointerSize;
3028 store_base = elements;
3029 } else {
3030 Register key = ToRegister(instr->key());
3031 // Even though the HLoadKeyed instruction forces the input
3032 // representation for the key to be an integer, the input gets replaced
3033 // during bound check elimination with the index argument to the bounds
3034 // check, which can be tagged, so that case must be handled here, too.
3035 if (instr->hydrogen()->key()->representation().IsSmi()) {
3036 __ SmiScale(scratch, key, kPointerSizeLog2);
3037 __ daddu(scratch, elements, scratch);
3038 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003039 __ Dlsa(scratch, elements, key, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003040 }
3041 }
3042
3043 Representation representation = hinstr->representation();
3044 if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3045 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3046 DCHECK(!hinstr->RequiresHoleCheck());
3047 if (FLAG_debug_code) {
3048 Register temp = scratch1();
3049 __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
3050 __ AssertSmi(temp);
3051 }
3052
3053 // Read int value directly from upper half of the smi.
3054 STATIC_ASSERT(kSmiTag == 0);
3055 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3056 offset = SmiWordOffset(offset);
3057 }
3058
3059 __ Load(result, MemOperand(store_base, offset), representation);
3060
3061 // Check for the hole value.
3062 if (hinstr->RequiresHoleCheck()) {
3063 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3064 __ SmiTst(result, scratch);
3065 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
3066 Operand(zero_reg));
3067 } else {
3068 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3069 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
3070 }
3071 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3072 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3073 Label done;
3074 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3075 __ Branch(&done, ne, result, Operand(scratch));
3076 if (info()->IsStub()) {
3077 // A stub can safely convert the hole to undefined only if the array
3078 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3079 // it needs to bail out.
3080 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3081 // The comparison only needs LS bits of value, which is a smi.
3082 __ ld(result, FieldMemOperand(result, Cell::kValueOffset));
3083 DeoptimizeIf(ne, instr, Deoptimizer::kHole, result,
3084 Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
3085 }
3086 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3087 __ bind(&done);
3088 }
3089}
3090
3091
3092void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3093 if (instr->is_fixed_typed_array()) {
3094 DoLoadKeyedExternalArray(instr);
3095 } else if (instr->hydrogen()->representation().IsDouble()) {
3096 DoLoadKeyedFixedDoubleArray(instr);
3097 } else {
3098 DoLoadKeyedFixedArray(instr);
3099 }
3100}
3101
3102
3103MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3104 Register base,
3105 bool key_is_constant,
3106 int constant_key,
3107 int element_size,
3108 int shift_size,
3109 int base_offset) {
3110 if (key_is_constant) {
3111 return MemOperand(base, (constant_key << element_size) + base_offset);
3112 }
3113
3114 if (base_offset == 0) {
3115 if (shift_size >= 0) {
3116 __ dsll(scratch0(), key, shift_size);
3117 __ Daddu(scratch0(), base, scratch0());
3118 return MemOperand(scratch0());
3119 } else {
3120 if (shift_size == -32) {
3121 __ dsra32(scratch0(), key, 0);
3122 } else {
3123 __ dsra(scratch0(), key, -shift_size);
3124 }
3125 __ Daddu(scratch0(), base, scratch0());
3126 return MemOperand(scratch0());
3127 }
3128 }
3129
3130 if (shift_size >= 0) {
3131 __ dsll(scratch0(), key, shift_size);
3132 __ Daddu(scratch0(), base, scratch0());
3133 return MemOperand(scratch0(), base_offset);
3134 } else {
3135 if (shift_size == -32) {
3136 __ dsra32(scratch0(), key, 0);
3137 } else {
3138 __ dsra(scratch0(), key, -shift_size);
3139 }
3140 __ Daddu(scratch0(), base, scratch0());
3141 return MemOperand(scratch0(), base_offset);
3142 }
3143}
3144
3145
3146void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3147 DCHECK(ToRegister(instr->context()).is(cp));
3148 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3149 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3150
3151 if (instr->hydrogen()->HasVectorAndSlot()) {
3152 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3153 }
3154
3155 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
Ben Murdoch097c5b22016-05-18 11:27:45 +01003156 isolate(), instr->hydrogen()->initialization_state())
3157 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003158 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3159}
3160
3161
3162void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3163 Register scratch = scratch0();
3164 Register temp = scratch1();
3165 Register result = ToRegister(instr->result());
3166
3167 if (instr->hydrogen()->from_inlined()) {
3168 __ Dsubu(result, sp, 2 * kPointerSize);
Ben Murdochda12d292016-06-02 14:46:10 +01003169 } else if (instr->hydrogen()->arguments_adaptor()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003170 // Check if the calling frame is an arguments adaptor frame.
3171 Label done, adapted;
3172 __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01003173 __ ld(result,
3174 MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003175 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3176
3177 // Result is the frame pointer for the frame if not adapted and for the real
3178 // frame below the adaptor frame if adapted.
3179 __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
3180 __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
Ben Murdochda12d292016-06-02 14:46:10 +01003181 } else {
3182 __ mov(result, fp);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003183 }
3184}
3185
3186
3187void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3188 Register elem = ToRegister(instr->elements());
3189 Register result = ToRegister(instr->result());
3190
3191 Label done;
3192
3193 // If no arguments adaptor frame the number of arguments is fixed.
3194 __ Daddu(result, zero_reg, Operand(scope()->num_parameters()));
3195 __ Branch(&done, eq, fp, Operand(elem));
3196
3197 // Arguments adaptor frame present. Get argument length from there.
3198 __ ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3199 __ ld(result,
3200 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3201 __ SmiUntag(result);
3202
3203 // Argument length is in result register.
3204 __ bind(&done);
3205}
3206
3207
3208void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3209 Register receiver = ToRegister(instr->receiver());
3210 Register function = ToRegister(instr->function());
3211 Register result = ToRegister(instr->result());
3212 Register scratch = scratch0();
3213
3214 // If the receiver is null or undefined, we have to pass the global
3215 // object as a receiver to normal functions. Values have to be
3216 // passed unchanged to builtins and strict-mode functions.
3217 Label global_object, result_in_receiver;
3218
3219 if (!instr->hydrogen()->known_function()) {
3220 // Do not transform the receiver to object for strict mode functions.
3221 __ ld(scratch,
3222 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3223
3224 // Do not transform the receiver to object for builtins.
3225 int32_t strict_mode_function_mask =
3226 1 << SharedFunctionInfo::kStrictModeBitWithinByte;
3227 int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
3228
3229 __ lbu(at,
3230 FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset));
3231 __ And(at, at, Operand(strict_mode_function_mask));
3232 __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3233 __ lbu(at,
3234 FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
3235 __ And(at, at, Operand(native_mask));
3236 __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3237 }
3238
3239 // Normal function. Replace undefined or null with global receiver.
3240 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3241 __ Branch(&global_object, eq, receiver, Operand(scratch));
3242 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3243 __ Branch(&global_object, eq, receiver, Operand(scratch));
3244
3245 // Deoptimize if the receiver is not a JS object.
3246 __ SmiTst(receiver, scratch);
3247 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
3248
3249 __ GetObjectType(receiver, scratch, scratch);
3250 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
3251 Operand(FIRST_JS_RECEIVER_TYPE));
3252 __ Branch(&result_in_receiver);
3253
3254 __ bind(&global_object);
3255 __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
3256 __ ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3257 __ ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3258
3259 if (result.is(receiver)) {
3260 __ bind(&result_in_receiver);
3261 } else {
3262 Label result_ok;
3263 __ Branch(&result_ok);
3264 __ bind(&result_in_receiver);
3265 __ mov(result, receiver);
3266 __ bind(&result_ok);
3267 }
3268}
3269
3270
3271void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3272 Register receiver = ToRegister(instr->receiver());
3273 Register function = ToRegister(instr->function());
3274 Register length = ToRegister(instr->length());
3275 Register elements = ToRegister(instr->elements());
3276 Register scratch = scratch0();
3277 DCHECK(receiver.is(a0)); // Used for parameter count.
3278 DCHECK(function.is(a1)); // Required by InvokeFunction.
3279 DCHECK(ToRegister(instr->result()).is(v0));
3280
3281 // Copy the arguments to this function possibly from the
3282 // adaptor frame below it.
3283 const uint32_t kArgumentsLimit = 1 * KB;
3284 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
3285 Operand(kArgumentsLimit));
3286
3287 // Push the receiver and use the register to keep the original
3288 // number of arguments.
3289 __ push(receiver);
3290 __ Move(receiver, length);
3291 // The arguments are at a one pointer size offset from elements.
3292 __ Daddu(elements, elements, Operand(1 * kPointerSize));
3293
3294 // Loop through the arguments pushing them onto the execution
3295 // stack.
3296 Label invoke, loop;
3297 // length is a small non-negative integer, due to the test above.
3298 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3299 __ dsll(scratch, length, kPointerSizeLog2);
3300 __ bind(&loop);
3301 __ Daddu(scratch, elements, scratch);
3302 __ ld(scratch, MemOperand(scratch));
3303 __ push(scratch);
3304 __ Dsubu(length, length, Operand(1));
3305 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3306 __ dsll(scratch, length, kPointerSizeLog2);
3307
3308 __ bind(&invoke);
Ben Murdochda12d292016-06-02 14:46:10 +01003309
3310 InvokeFlag flag = CALL_FUNCTION;
3311 if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
3312 DCHECK(!info()->saves_caller_doubles());
3313 // TODO(ishell): drop current frame before pushing arguments to the stack.
3314 flag = JUMP_FUNCTION;
3315 ParameterCount actual(a0);
3316 // It is safe to use t0, t1 and t2 as scratch registers here given that
3317 // we are not going to return to caller function anyway.
3318 PrepareForTailCall(actual, t0, t1, t2);
3319 }
3320
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003321 DCHECK(instr->HasPointerMap());
3322 LPointerMap* pointers = instr->pointer_map();
Ben Murdochda12d292016-06-02 14:46:10 +01003323 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003324 // The number of arguments is stored in receiver which is a0, as expected
3325 // by InvokeFunction.
3326 ParameterCount actual(receiver);
Ben Murdochda12d292016-06-02 14:46:10 +01003327 __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003328}
3329
3330
3331void LCodeGen::DoPushArgument(LPushArgument* instr) {
3332 LOperand* argument = instr->value();
3333 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3334 Abort(kDoPushArgumentNotImplementedForDoubleType);
3335 } else {
3336 Register argument_reg = EmitLoadRegister(argument, at);
3337 __ push(argument_reg);
3338 }
3339}
3340
3341
3342void LCodeGen::DoDrop(LDrop* instr) {
3343 __ Drop(instr->count());
3344}
3345
3346
3347void LCodeGen::DoThisFunction(LThisFunction* instr) {
3348 Register result = ToRegister(instr->result());
3349 __ ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3350}
3351
3352
3353void LCodeGen::DoContext(LContext* instr) {
3354 // If there is a non-return use, the context must be moved to a register.
3355 Register result = ToRegister(instr->result());
3356 if (info()->IsOptimizing()) {
3357 __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3358 } else {
3359 // If there is no frame, the context must be in cp.
3360 DCHECK(result.is(cp));
3361 }
3362}
3363
3364
3365void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3366 DCHECK(ToRegister(instr->context()).is(cp));
3367 __ li(scratch0(), instr->hydrogen()->pairs());
3368 __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3369 __ Push(scratch0(), scratch1());
3370 CallRuntime(Runtime::kDeclareGlobals, instr);
3371}
3372
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003373void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3374 int formal_parameter_count, int arity,
Ben Murdochda12d292016-06-02 14:46:10 +01003375 bool is_tail_call, LInstruction* instr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003376 bool dont_adapt_arguments =
3377 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3378 bool can_invoke_directly =
3379 dont_adapt_arguments || formal_parameter_count == arity;
3380
3381 Register function_reg = a1;
3382 LPointerMap* pointers = instr->pointer_map();
3383
3384 if (can_invoke_directly) {
3385 // Change context.
3386 __ ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3387
3388 // Always initialize new target and number of actual arguments.
3389 __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
3390 __ li(a0, Operand(arity));
3391
Ben Murdochda12d292016-06-02 14:46:10 +01003392 bool is_self_call = function.is_identical_to(info()->closure());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003393
Ben Murdochda12d292016-06-02 14:46:10 +01003394 // Invoke function.
3395 if (is_self_call) {
3396 Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
3397 if (is_tail_call) {
3398 __ Jump(self, RelocInfo::CODE_TARGET);
3399 } else {
3400 __ Call(self, RelocInfo::CODE_TARGET);
3401 }
3402 } else {
3403 __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3404 if (is_tail_call) {
3405 __ Jump(at);
3406 } else {
3407 __ Call(at);
3408 }
3409 }
3410
3411 if (!is_tail_call) {
3412 // Set up deoptimization.
3413 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3414 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003415 } else {
3416 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdochda12d292016-06-02 14:46:10 +01003417 ParameterCount actual(arity);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003418 ParameterCount expected(formal_parameter_count);
Ben Murdochda12d292016-06-02 14:46:10 +01003419 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3420 __ InvokeFunction(function_reg, expected, actual, flag, generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003421 }
3422}
3423
3424
3425void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3426 DCHECK(instr->context() != NULL);
3427 DCHECK(ToRegister(instr->context()).is(cp));
3428 Register input = ToRegister(instr->value());
3429 Register result = ToRegister(instr->result());
3430 Register scratch = scratch0();
3431
3432 // Deoptimize if not a heap number.
3433 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3434 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3435 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
3436
3437 Label done;
3438 Register exponent = scratch0();
3439 scratch = no_reg;
3440 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3441 // Check the sign of the argument. If the argument is positive, just
3442 // return it.
3443 __ Move(result, input);
3444 __ And(at, exponent, Operand(HeapNumber::kSignMask));
3445 __ Branch(&done, eq, at, Operand(zero_reg));
3446
3447 // Input is negative. Reverse its sign.
3448 // Preserve the value of all registers.
3449 {
3450 PushSafepointRegistersScope scope(this);
3451
3452 // Registers were saved at the safepoint, so we can use
3453 // many scratch registers.
3454 Register tmp1 = input.is(a1) ? a0 : a1;
3455 Register tmp2 = input.is(a2) ? a0 : a2;
3456 Register tmp3 = input.is(a3) ? a0 : a3;
3457 Register tmp4 = input.is(a4) ? a0 : a4;
3458
3459 // exponent: floating point exponent value.
3460
3461 Label allocated, slow;
3462 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3463 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3464 __ Branch(&allocated);
3465
3466 // Slow case: Call the runtime system to do the number allocation.
3467 __ bind(&slow);
3468
3469 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3470 instr->context());
3471 // Set the pointer to the new heap number in tmp.
3472 if (!tmp1.is(v0))
3473 __ mov(tmp1, v0);
3474 // Restore input_reg after call to runtime.
3475 __ LoadFromSafepointRegisterSlot(input, input);
3476 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3477
3478 __ bind(&allocated);
3479 // exponent: floating point exponent value.
3480 // tmp1: allocated heap number.
3481 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3482 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3483 __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3484 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3485
3486 __ StoreToSafepointRegisterSlot(tmp1, result);
3487 }
3488
3489 __ bind(&done);
3490}
3491
3492
3493void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3494 Register input = ToRegister(instr->value());
3495 Register result = ToRegister(instr->result());
3496 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3497 Label done;
3498 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3499 __ mov(result, input);
3500 __ subu(result, zero_reg, input);
3501 // Overflow if result is still negative, i.e. 0x80000000.
3502 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
3503 __ bind(&done);
3504}
3505
3506
3507void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3508 Register input = ToRegister(instr->value());
3509 Register result = ToRegister(instr->result());
3510 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3511 Label done;
3512 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3513 __ mov(result, input);
3514 __ dsubu(result, zero_reg, input);
3515 // Overflow if result is still negative, i.e. 0x80000000 00000000.
3516 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
3517 __ bind(&done);
3518}
3519
3520
3521void LCodeGen::DoMathAbs(LMathAbs* instr) {
3522 // Class for deferred case.
3523 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3524 public:
3525 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3526 : LDeferredCode(codegen), instr_(instr) { }
3527 void Generate() override {
3528 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3529 }
3530 LInstruction* instr() override { return instr_; }
3531
3532 private:
3533 LMathAbs* instr_;
3534 };
3535
3536 Representation r = instr->hydrogen()->value()->representation();
3537 if (r.IsDouble()) {
3538 FPURegister input = ToDoubleRegister(instr->value());
3539 FPURegister result = ToDoubleRegister(instr->result());
3540 __ abs_d(result, input);
3541 } else if (r.IsInteger32()) {
3542 EmitIntegerMathAbs(instr);
3543 } else if (r.IsSmi()) {
3544 EmitSmiMathAbs(instr);
3545 } else {
3546 // Representation is tagged.
3547 DeferredMathAbsTaggedHeapNumber* deferred =
3548 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3549 Register input = ToRegister(instr->value());
3550 // Smi check.
3551 __ JumpIfNotSmi(input, deferred->entry());
3552 // If smi, handle it directly.
3553 EmitSmiMathAbs(instr);
3554 __ bind(deferred->exit());
3555 }
3556}
3557
3558
3559void LCodeGen::DoMathFloor(LMathFloor* instr) {
3560 DoubleRegister input = ToDoubleRegister(instr->value());
3561 Register result = ToRegister(instr->result());
3562 Register scratch1 = scratch0();
3563 Register except_flag = ToRegister(instr->temp());
3564
3565 __ EmitFPUTruncate(kRoundToMinusInf,
3566 result,
3567 input,
3568 scratch1,
3569 double_scratch0(),
3570 except_flag);
3571
3572 // Deopt if the operation did not succeed.
3573 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3574 Operand(zero_reg));
3575
3576 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3577 // Test for -0.
3578 Label done;
3579 __ Branch(&done, ne, result, Operand(zero_reg));
3580 __ mfhc1(scratch1, input); // Get exponent/sign bits.
3581 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3582 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
3583 Operand(zero_reg));
3584 __ bind(&done);
3585 }
3586}
3587
3588
3589void LCodeGen::DoMathRound(LMathRound* instr) {
3590 DoubleRegister input = ToDoubleRegister(instr->value());
3591 Register result = ToRegister(instr->result());
3592 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3593 Register scratch = scratch0();
3594 Label done, check_sign_on_zero;
3595
3596 // Extract exponent bits.
3597 __ mfhc1(result, input);
3598 __ Ext(scratch,
3599 result,
3600 HeapNumber::kExponentShift,
3601 HeapNumber::kExponentBits);
3602
3603 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3604 Label skip1;
3605 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3606 __ mov(result, zero_reg);
3607 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3608 __ Branch(&check_sign_on_zero);
3609 } else {
3610 __ Branch(&done);
3611 }
3612 __ bind(&skip1);
3613
3614 // The following conversion will not work with numbers
3615 // outside of ]-2^32, 2^32[.
3616 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
3617 Operand(HeapNumber::kExponentBias + 32));
3618
3619 // Save the original sign for later comparison.
3620 __ And(scratch, result, Operand(HeapNumber::kSignMask));
3621
3622 __ Move(double_scratch0(), 0.5);
3623 __ add_d(double_scratch0(), input, double_scratch0());
3624
3625 // Check sign of the result: if the sign changed, the input
3626 // value was in ]0.5, 0[ and the result should be -0.
3627 __ mfhc1(result, double_scratch0());
3628 // mfhc1 sign-extends, clear the upper bits.
3629 __ dsll32(result, result, 0);
3630 __ dsrl32(result, result, 0);
3631 __ Xor(result, result, Operand(scratch));
3632 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3633 // ARM uses 'mi' here, which is 'lt'
3634 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
3635 } else {
3636 Label skip2;
3637 // ARM uses 'mi' here, which is 'lt'
3638 // Negating it results in 'ge'
3639 __ Branch(&skip2, ge, result, Operand(zero_reg));
3640 __ mov(result, zero_reg);
3641 __ Branch(&done);
3642 __ bind(&skip2);
3643 }
3644
3645 Register except_flag = scratch;
3646 __ EmitFPUTruncate(kRoundToMinusInf,
3647 result,
3648 double_scratch0(),
3649 at,
3650 double_scratch1,
3651 except_flag);
3652
3653 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3654 Operand(zero_reg));
3655
3656 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3657 // Test for -0.
3658 __ Branch(&done, ne, result, Operand(zero_reg));
3659 __ bind(&check_sign_on_zero);
3660 __ mfhc1(scratch, input); // Get exponent/sign bits.
3661 __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3662 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
3663 Operand(zero_reg));
3664 }
3665 __ bind(&done);
3666}
3667
3668
3669void LCodeGen::DoMathFround(LMathFround* instr) {
3670 DoubleRegister input = ToDoubleRegister(instr->value());
3671 DoubleRegister result = ToDoubleRegister(instr->result());
3672 __ cvt_s_d(result, input);
3673 __ cvt_d_s(result, result);
3674}
3675
3676
3677void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3678 DoubleRegister input = ToDoubleRegister(instr->value());
3679 DoubleRegister result = ToDoubleRegister(instr->result());
3680 __ sqrt_d(result, input);
3681}
3682
3683
3684void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3685 DoubleRegister input = ToDoubleRegister(instr->value());
3686 DoubleRegister result = ToDoubleRegister(instr->result());
3687 DoubleRegister temp = ToDoubleRegister(instr->temp());
3688
3689 DCHECK(!input.is(result));
3690
3691 // Note that according to ECMA-262 15.8.2.13:
3692 // Math.pow(-Infinity, 0.5) == Infinity
3693 // Math.sqrt(-Infinity) == NaN
3694 Label done;
3695 __ Move(temp, static_cast<double>(-V8_INFINITY));
3696 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3697 // Set up Infinity in the delay slot.
3698 // result is overwritten if the branch is not taken.
3699 __ neg_d(result, temp);
3700
3701 // Add +0 to convert -0 to +0.
3702 __ add_d(result, input, kDoubleRegZero);
3703 __ sqrt_d(result, result);
3704 __ bind(&done);
3705}
3706
3707
3708void LCodeGen::DoPower(LPower* instr) {
3709 Representation exponent_type = instr->hydrogen()->right()->representation();
3710 // Having marked this as a call, we can use any registers.
3711 // Just make sure that the input/output registers are the expected ones.
3712 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3713 DCHECK(!instr->right()->IsDoubleRegister() ||
3714 ToDoubleRegister(instr->right()).is(f4));
3715 DCHECK(!instr->right()->IsRegister() ||
3716 ToRegister(instr->right()).is(tagged_exponent));
3717 DCHECK(ToDoubleRegister(instr->left()).is(f2));
3718 DCHECK(ToDoubleRegister(instr->result()).is(f0));
3719
3720 if (exponent_type.IsSmi()) {
3721 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3722 __ CallStub(&stub);
3723 } else if (exponent_type.IsTagged()) {
3724 Label no_deopt;
3725 __ JumpIfSmi(tagged_exponent, &no_deopt);
3726 DCHECK(!a7.is(tagged_exponent));
3727 __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3728 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3729 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, a7, Operand(at));
3730 __ bind(&no_deopt);
3731 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3732 __ CallStub(&stub);
3733 } else if (exponent_type.IsInteger32()) {
3734 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3735 __ CallStub(&stub);
3736 } else {
3737 DCHECK(exponent_type.IsDouble());
3738 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3739 __ CallStub(&stub);
3740 }
3741}
3742
3743
3744void LCodeGen::DoMathExp(LMathExp* instr) {
3745 DoubleRegister input = ToDoubleRegister(instr->value());
3746 DoubleRegister result = ToDoubleRegister(instr->result());
3747 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3748 DoubleRegister double_scratch2 = double_scratch0();
3749 Register temp1 = ToRegister(instr->temp1());
3750 Register temp2 = ToRegister(instr->temp2());
3751
3752 MathExpGenerator::EmitMathExp(
3753 masm(), input, result, double_scratch1, double_scratch2,
3754 temp1, temp2, scratch0());
3755}
3756
3757
3758void LCodeGen::DoMathLog(LMathLog* instr) {
3759 __ PrepareCallCFunction(0, 1, scratch0());
3760 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3761 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3762 0, 1);
3763 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3764}
3765
3766
3767void LCodeGen::DoMathClz32(LMathClz32* instr) {
3768 Register input = ToRegister(instr->value());
3769 Register result = ToRegister(instr->result());
3770 __ Clz(result, input);
3771}
3772
Ben Murdochda12d292016-06-02 14:46:10 +01003773void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
3774 Register scratch1, Register scratch2,
3775 Register scratch3) {
3776#if DEBUG
3777 if (actual.is_reg()) {
3778 DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
3779 } else {
3780 DCHECK(!AreAliased(scratch1, scratch2, scratch3));
3781 }
3782#endif
3783 if (FLAG_code_comments) {
3784 if (actual.is_reg()) {
3785 Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
3786 } else {
3787 Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
3788 }
3789 }
3790
3791 // Check if next frame is an arguments adaptor frame.
3792 Register caller_args_count_reg = scratch1;
3793 Label no_arguments_adaptor, formal_parameter_count_loaded;
3794 __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3795 __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
3796 __ Branch(&no_arguments_adaptor, ne, scratch3,
3797 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3798
3799 // Drop current frame and load arguments count from arguments adaptor frame.
3800 __ mov(fp, scratch2);
3801 __ ld(caller_args_count_reg,
3802 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
3803 __ SmiUntag(caller_args_count_reg);
3804 __ Branch(&formal_parameter_count_loaded);
3805
3806 __ bind(&no_arguments_adaptor);
3807 // Load caller's formal parameter count
3808 __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
3809
3810 __ bind(&formal_parameter_count_loaded);
3811 __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
3812
3813 Comment(";;; }");
3814}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003815
3816void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
Ben Murdochda12d292016-06-02 14:46:10 +01003817 HInvokeFunction* hinstr = instr->hydrogen();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003818 DCHECK(ToRegister(instr->context()).is(cp));
3819 DCHECK(ToRegister(instr->function()).is(a1));
3820 DCHECK(instr->HasPointerMap());
3821
Ben Murdochda12d292016-06-02 14:46:10 +01003822 bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
3823
3824 if (is_tail_call) {
3825 DCHECK(!info()->saves_caller_doubles());
3826 ParameterCount actual(instr->arity());
3827 // It is safe to use t0, t1 and t2 as scratch registers here given that
3828 // we are not going to return to caller function anyway.
3829 PrepareForTailCall(actual, t0, t1, t2);
3830 }
3831
3832 Handle<JSFunction> known_function = hinstr->known_function();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003833 if (known_function.is_null()) {
3834 LPointerMap* pointers = instr->pointer_map();
3835 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdochda12d292016-06-02 14:46:10 +01003836 ParameterCount actual(instr->arity());
3837 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3838 __ InvokeFunction(a1, no_reg, actual, flag, generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003839 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01003840 CallKnownFunction(known_function, hinstr->formal_parameter_count(),
3841 instr->arity(), is_tail_call, instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003842 }
3843}
3844
3845
3846void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3847 DCHECK(ToRegister(instr->result()).is(v0));
3848
3849 if (instr->hydrogen()->IsTailCall()) {
3850 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3851
3852 if (instr->target()->IsConstantOperand()) {
3853 LConstantOperand* target = LConstantOperand::cast(instr->target());
3854 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3855 __ Jump(code, RelocInfo::CODE_TARGET);
3856 } else {
3857 DCHECK(instr->target()->IsRegister());
3858 Register target = ToRegister(instr->target());
3859 __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3860 __ Jump(target);
3861 }
3862 } else {
3863 LPointerMap* pointers = instr->pointer_map();
3864 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3865
3866 if (instr->target()->IsConstantOperand()) {
3867 LConstantOperand* target = LConstantOperand::cast(instr->target());
3868 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3869 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3870 __ Call(code, RelocInfo::CODE_TARGET);
3871 } else {
3872 DCHECK(instr->target()->IsRegister());
3873 Register target = ToRegister(instr->target());
3874 generator.BeforeCall(__ CallSize(target));
3875 __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3876 __ Call(target);
3877 }
3878 generator.AfterCall();
3879 }
3880}
3881
3882
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003883void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3884 DCHECK(ToRegister(instr->context()).is(cp));
3885 DCHECK(ToRegister(instr->constructor()).is(a1));
3886 DCHECK(ToRegister(instr->result()).is(v0));
3887
3888 __ li(a0, Operand(instr->arity()));
3889 if (instr->arity() == 1) {
3890 // We only need the allocation site for the case we have a length argument.
3891 // The case may bail out to the runtime, which will determine the correct
3892 // elements kind with the site.
3893 __ li(a2, instr->hydrogen()->site());
3894 } else {
3895 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3896 }
3897 ElementsKind kind = instr->hydrogen()->elements_kind();
3898 AllocationSiteOverrideMode override_mode =
3899 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3900 ? DISABLE_ALLOCATION_SITES
3901 : DONT_OVERRIDE;
3902
3903 if (instr->arity() == 0) {
3904 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3905 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3906 } else if (instr->arity() == 1) {
3907 Label done;
3908 if (IsFastPackedElementsKind(kind)) {
3909 Label packed_case;
3910 // We might need a change here,
3911 // look at the first argument.
3912 __ ld(a5, MemOperand(sp, 0));
3913 __ Branch(&packed_case, eq, a5, Operand(zero_reg));
3914
3915 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3916 ArraySingleArgumentConstructorStub stub(isolate(),
3917 holey_kind,
3918 override_mode);
3919 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3920 __ jmp(&done);
3921 __ bind(&packed_case);
3922 }
3923
3924 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3925 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3926 __ bind(&done);
3927 } else {
3928 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
3929 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3930 }
3931}
3932
3933
3934void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3935 CallRuntime(instr->function(), instr->arity(), instr);
3936}
3937
3938
3939void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3940 Register function = ToRegister(instr->function());
3941 Register code_object = ToRegister(instr->code_object());
3942 __ Daddu(code_object, code_object,
3943 Operand(Code::kHeaderSize - kHeapObjectTag));
3944 __ sd(code_object,
3945 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
3946}
3947
3948
3949void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3950 Register result = ToRegister(instr->result());
3951 Register base = ToRegister(instr->base_object());
3952 if (instr->offset()->IsConstantOperand()) {
3953 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3954 __ Daddu(result, base, Operand(ToInteger32(offset)));
3955 } else {
3956 Register offset = ToRegister(instr->offset());
3957 __ Daddu(result, base, offset);
3958 }
3959}
3960
3961
3962void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3963 Representation representation = instr->representation();
3964
3965 Register object = ToRegister(instr->object());
3966 Register scratch2 = scratch1();
3967 Register scratch1 = scratch0();
3968
3969 HObjectAccess access = instr->hydrogen()->access();
3970 int offset = access.offset();
3971 if (access.IsExternalMemory()) {
3972 Register value = ToRegister(instr->value());
3973 MemOperand operand = MemOperand(object, offset);
3974 __ Store(value, operand, representation);
3975 return;
3976 }
3977
3978 __ AssertNotSmi(object);
3979
3980 DCHECK(!representation.IsSmi() ||
3981 !instr->value()->IsConstantOperand() ||
3982 IsSmi(LConstantOperand::cast(instr->value())));
3983 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
3984 DCHECK(access.IsInobject());
3985 DCHECK(!instr->hydrogen()->has_transition());
3986 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3987 DoubleRegister value = ToDoubleRegister(instr->value());
3988 __ sdc1(value, FieldMemOperand(object, offset));
3989 return;
3990 }
3991
3992 if (instr->hydrogen()->has_transition()) {
3993 Handle<Map> transition = instr->hydrogen()->transition_map();
3994 AddDeprecationDependency(transition);
3995 __ li(scratch1, Operand(transition));
3996 __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
3997 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3998 Register temp = ToRegister(instr->temp());
3999 // Update the write barrier for the map field.
4000 __ RecordWriteForMap(object,
4001 scratch1,
4002 temp,
4003 GetRAState(),
4004 kSaveFPRegs);
4005 }
4006 }
4007
4008 // Do the store.
4009 Register destination = object;
4010 if (!access.IsInobject()) {
4011 destination = scratch1;
4012 __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
4013 }
4014
4015 if (representation.IsSmi() && SmiValuesAre32Bits() &&
4016 instr->hydrogen()->value()->representation().IsInteger32()) {
4017 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4018 if (FLAG_debug_code) {
4019 __ Load(scratch2, FieldMemOperand(destination, offset), representation);
4020 __ AssertSmi(scratch2);
4021 }
4022 // Store int value directly to upper half of the smi.
4023 offset = SmiWordOffset(offset);
4024 representation = Representation::Integer32();
4025 }
4026 MemOperand operand = FieldMemOperand(destination, offset);
4027
4028 if (FLAG_unbox_double_fields && representation.IsDouble()) {
4029 DCHECK(access.IsInobject());
4030 DoubleRegister value = ToDoubleRegister(instr->value());
4031 __ sdc1(value, operand);
4032 } else {
4033 DCHECK(instr->value()->IsRegister());
4034 Register value = ToRegister(instr->value());
4035 __ Store(value, operand, representation);
4036 }
4037
4038 if (instr->hydrogen()->NeedsWriteBarrier()) {
4039 // Update the write barrier for the object for in-object properties.
4040 Register value = ToRegister(instr->value());
4041 __ RecordWriteField(destination,
4042 offset,
4043 value,
4044 scratch2,
4045 GetRAState(),
4046 kSaveFPRegs,
4047 EMIT_REMEMBERED_SET,
4048 instr->hydrogen()->SmiCheckForWriteBarrier(),
4049 instr->hydrogen()->PointersToHereCheckForValue());
4050 }
4051}
4052
4053
4054void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4055 DCHECK(ToRegister(instr->context()).is(cp));
4056 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4057 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4058
4059 if (instr->hydrogen()->HasVectorAndSlot()) {
4060 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
4061 }
4062
4063 __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
4064 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
4065 isolate(), instr->language_mode(),
4066 instr->hydrogen()->initialization_state()).code();
4067 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4068}
4069
4070
4071void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4072 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4073 Operand operand((int64_t)0);
4074 Register reg;
4075 if (instr->index()->IsConstantOperand()) {
4076 operand = ToOperand(instr->index());
4077 reg = ToRegister(instr->length());
4078 cc = CommuteCondition(cc);
4079 } else {
4080 reg = ToRegister(instr->index());
4081 operand = ToOperand(instr->length());
4082 }
4083 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4084 Label done;
4085 __ Branch(&done, NegateCondition(cc), reg, operand);
4086 __ stop("eliminated bounds check failed");
4087 __ bind(&done);
4088 } else {
4089 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
4090 }
4091}
4092
4093
4094void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4095 Register external_pointer = ToRegister(instr->elements());
4096 Register key = no_reg;
4097 ElementsKind elements_kind = instr->elements_kind();
4098 bool key_is_constant = instr->key()->IsConstantOperand();
4099 int constant_key = 0;
4100 if (key_is_constant) {
4101 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4102 if (constant_key & 0xF0000000) {
4103 Abort(kArrayIndexConstantValueTooBig);
4104 }
4105 } else {
4106 key = ToRegister(instr->key());
4107 }
4108 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4109 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4110 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
4111 : element_size_shift;
4112 int base_offset = instr->base_offset();
4113
4114 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
4115 Register address = scratch0();
4116 FPURegister value(ToDoubleRegister(instr->value()));
4117 if (key_is_constant) {
4118 if (constant_key != 0) {
4119 __ Daddu(address, external_pointer,
4120 Operand(constant_key << element_size_shift));
4121 } else {
4122 address = external_pointer;
4123 }
4124 } else {
4125 if (shift_size < 0) {
4126 if (shift_size == -32) {
4127 __ dsra32(address, key, 0);
4128 } else {
4129 __ dsra(address, key, -shift_size);
4130 }
4131 } else {
4132 __ dsll(address, key, shift_size);
4133 }
4134 __ Daddu(address, external_pointer, address);
4135 }
4136
4137 if (elements_kind == FLOAT32_ELEMENTS) {
4138 __ cvt_s_d(double_scratch0(), value);
4139 __ swc1(double_scratch0(), MemOperand(address, base_offset));
4140 } else { // Storing doubles, not floats.
4141 __ sdc1(value, MemOperand(address, base_offset));
4142 }
4143 } else {
4144 Register value(ToRegister(instr->value()));
4145 MemOperand mem_operand = PrepareKeyedOperand(
4146 key, external_pointer, key_is_constant, constant_key,
4147 element_size_shift, shift_size,
4148 base_offset);
4149 switch (elements_kind) {
4150 case UINT8_ELEMENTS:
4151 case UINT8_CLAMPED_ELEMENTS:
4152 case INT8_ELEMENTS:
4153 __ sb(value, mem_operand);
4154 break;
4155 case INT16_ELEMENTS:
4156 case UINT16_ELEMENTS:
4157 __ sh(value, mem_operand);
4158 break;
4159 case INT32_ELEMENTS:
4160 case UINT32_ELEMENTS:
4161 __ sw(value, mem_operand);
4162 break;
4163 case FLOAT32_ELEMENTS:
4164 case FLOAT64_ELEMENTS:
4165 case FAST_DOUBLE_ELEMENTS:
4166 case FAST_ELEMENTS:
4167 case FAST_SMI_ELEMENTS:
4168 case FAST_HOLEY_DOUBLE_ELEMENTS:
4169 case FAST_HOLEY_ELEMENTS:
4170 case FAST_HOLEY_SMI_ELEMENTS:
4171 case DICTIONARY_ELEMENTS:
4172 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4173 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01004174 case FAST_STRING_WRAPPER_ELEMENTS:
4175 case SLOW_STRING_WRAPPER_ELEMENTS:
4176 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004177 UNREACHABLE();
4178 break;
4179 }
4180 }
4181}
4182
4183
4184void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4185 DoubleRegister value = ToDoubleRegister(instr->value());
4186 Register elements = ToRegister(instr->elements());
4187 Register scratch = scratch0();
4188 DoubleRegister double_scratch = double_scratch0();
4189 bool key_is_constant = instr->key()->IsConstantOperand();
4190 int base_offset = instr->base_offset();
4191 Label not_nan, done;
4192
4193 // Calculate the effective address of the slot in the array to store the
4194 // double value.
4195 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4196 if (key_is_constant) {
4197 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4198 if (constant_key & 0xF0000000) {
4199 Abort(kArrayIndexConstantValueTooBig);
4200 }
4201 __ Daddu(scratch, elements,
4202 Operand((constant_key << element_size_shift) + base_offset));
4203 } else {
4204 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4205 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
4206 : element_size_shift;
4207 __ Daddu(scratch, elements, Operand(base_offset));
4208 DCHECK((shift_size == 3) || (shift_size == -29));
4209 if (shift_size == 3) {
4210 __ dsll(at, ToRegister(instr->key()), 3);
4211 } else if (shift_size == -29) {
4212 __ dsra(at, ToRegister(instr->key()), 29);
4213 }
4214 __ Daddu(scratch, scratch, at);
4215 }
4216
4217 if (instr->NeedsCanonicalization()) {
4218 __ FPUCanonicalizeNaN(double_scratch, value);
4219 __ sdc1(double_scratch, MemOperand(scratch, 0));
4220 } else {
4221 __ sdc1(value, MemOperand(scratch, 0));
4222 }
4223}
4224
4225
4226void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4227 Register value = ToRegister(instr->value());
4228 Register elements = ToRegister(instr->elements());
4229 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4230 : no_reg;
4231 Register scratch = scratch0();
4232 Register store_base = scratch;
4233 int offset = instr->base_offset();
4234
4235 // Do the store.
4236 if (instr->key()->IsConstantOperand()) {
4237 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4238 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4239 offset += ToInteger32(const_operand) * kPointerSize;
4240 store_base = elements;
4241 } else {
4242 // Even though the HLoadKeyed instruction forces the input
4243 // representation for the key to be an integer, the input gets replaced
4244 // during bound check elimination with the index argument to the bounds
4245 // check, which can be tagged, so that case must be handled here, too.
4246 if (instr->hydrogen()->key()->representation().IsSmi()) {
4247 __ SmiScale(scratch, key, kPointerSizeLog2);
4248 __ daddu(store_base, elements, scratch);
4249 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004250 __ Dlsa(store_base, elements, key, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004251 }
4252 }
4253
4254 Representation representation = instr->hydrogen()->value()->representation();
4255 if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4256 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4257 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
4258 if (FLAG_debug_code) {
4259 Register temp = scratch1();
4260 __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
4261 __ AssertSmi(temp);
4262 }
4263
4264 // Store int value directly to upper half of the smi.
4265 STATIC_ASSERT(kSmiTag == 0);
4266 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4267 offset = SmiWordOffset(offset);
4268 representation = Representation::Integer32();
4269 }
4270
4271 __ Store(value, MemOperand(store_base, offset), representation);
4272
4273 if (instr->hydrogen()->NeedsWriteBarrier()) {
4274 SmiCheck check_needed =
4275 instr->hydrogen()->value()->type().IsHeapObject()
4276 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4277 // Compute address of modified element and store it into key register.
4278 __ Daddu(key, store_base, Operand(offset));
4279 __ RecordWrite(elements,
4280 key,
4281 value,
4282 GetRAState(),
4283 kSaveFPRegs,
4284 EMIT_REMEMBERED_SET,
4285 check_needed,
4286 instr->hydrogen()->PointersToHereCheckForValue());
4287 }
4288}
4289
4290
4291void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4292 // By cases: external, fast double
4293 if (instr->is_fixed_typed_array()) {
4294 DoStoreKeyedExternalArray(instr);
4295 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4296 DoStoreKeyedFixedDoubleArray(instr);
4297 } else {
4298 DoStoreKeyedFixedArray(instr);
4299 }
4300}
4301
4302
4303void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4304 DCHECK(ToRegister(instr->context()).is(cp));
4305 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4306 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4307 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4308
4309 if (instr->hydrogen()->HasVectorAndSlot()) {
4310 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4311 }
4312
4313 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4314 isolate(), instr->language_mode(),
4315 instr->hydrogen()->initialization_state()).code();
4316 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4317}
4318
4319
4320void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4321 class DeferredMaybeGrowElements final : public LDeferredCode {
4322 public:
4323 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4324 : LDeferredCode(codegen), instr_(instr) {}
4325 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4326 LInstruction* instr() override { return instr_; }
4327
4328 private:
4329 LMaybeGrowElements* instr_;
4330 };
4331
4332 Register result = v0;
4333 DeferredMaybeGrowElements* deferred =
4334 new (zone()) DeferredMaybeGrowElements(this, instr);
4335 LOperand* key = instr->key();
4336 LOperand* current_capacity = instr->current_capacity();
4337
4338 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4339 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4340 DCHECK(key->IsConstantOperand() || key->IsRegister());
4341 DCHECK(current_capacity->IsConstantOperand() ||
4342 current_capacity->IsRegister());
4343
4344 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4345 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4346 int32_t constant_capacity =
4347 ToInteger32(LConstantOperand::cast(current_capacity));
4348 if (constant_key >= constant_capacity) {
4349 // Deferred case.
4350 __ jmp(deferred->entry());
4351 }
4352 } else if (key->IsConstantOperand()) {
4353 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4354 __ Branch(deferred->entry(), le, ToRegister(current_capacity),
4355 Operand(constant_key));
4356 } else if (current_capacity->IsConstantOperand()) {
4357 int32_t constant_capacity =
4358 ToInteger32(LConstantOperand::cast(current_capacity));
4359 __ Branch(deferred->entry(), ge, ToRegister(key),
4360 Operand(constant_capacity));
4361 } else {
4362 __ Branch(deferred->entry(), ge, ToRegister(key),
4363 Operand(ToRegister(current_capacity)));
4364 }
4365
4366 if (instr->elements()->IsRegister()) {
4367 __ mov(result, ToRegister(instr->elements()));
4368 } else {
4369 __ ld(result, ToMemOperand(instr->elements()));
4370 }
4371
4372 __ bind(deferred->exit());
4373}
4374
4375
4376void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4377 // TODO(3095996): Get rid of this. For now, we need to make the
4378 // result register contain a valid pointer because it is already
4379 // contained in the register pointer map.
4380 Register result = v0;
4381 __ mov(result, zero_reg);
4382
4383 // We have to call a stub.
4384 {
4385 PushSafepointRegistersScope scope(this);
4386 if (instr->object()->IsRegister()) {
4387 __ mov(result, ToRegister(instr->object()));
4388 } else {
4389 __ ld(result, ToMemOperand(instr->object()));
4390 }
4391
4392 LOperand* key = instr->key();
4393 if (key->IsConstantOperand()) {
4394 __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
4395 } else {
4396 __ mov(a3, ToRegister(key));
4397 __ SmiTag(a3);
4398 }
4399
4400 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4401 instr->hydrogen()->kind());
4402 __ mov(a0, result);
4403 __ CallStub(&stub);
4404 RecordSafepointWithLazyDeopt(
4405 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4406 __ StoreToSafepointRegisterSlot(result, result);
4407 }
4408
4409 // Deopt on smi, which means the elements array changed to dictionary mode.
4410 __ SmiTst(result, at);
4411 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
4412}
4413
4414
4415void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4416 Register object_reg = ToRegister(instr->object());
4417 Register scratch = scratch0();
4418
4419 Handle<Map> from_map = instr->original_map();
4420 Handle<Map> to_map = instr->transitioned_map();
4421 ElementsKind from_kind = instr->from_kind();
4422 ElementsKind to_kind = instr->to_kind();
4423
4424 Label not_applicable;
4425 __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4426 __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4427
4428 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4429 Register new_map_reg = ToRegister(instr->new_map_temp());
4430 __ li(new_map_reg, Operand(to_map));
4431 __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4432 // Write barrier.
4433 __ RecordWriteForMap(object_reg,
4434 new_map_reg,
4435 scratch,
4436 GetRAState(),
4437 kDontSaveFPRegs);
4438 } else {
4439 DCHECK(object_reg.is(a0));
4440 DCHECK(ToRegister(instr->context()).is(cp));
4441 PushSafepointRegistersScope scope(this);
4442 __ li(a1, Operand(to_map));
4443 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4444 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4445 __ CallStub(&stub);
4446 RecordSafepointWithRegisters(
4447 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4448 }
4449 __ bind(&not_applicable);
4450}
4451
4452
4453void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4454 Register object = ToRegister(instr->object());
4455 Register temp = ToRegister(instr->temp());
4456 Label no_memento_found;
Ben Murdochda12d292016-06-02 14:46:10 +01004457 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004458 DeoptimizeIf(al, instr, Deoptimizer::kMementoFound);
4459 __ bind(&no_memento_found);
4460}
4461
4462
4463void LCodeGen::DoStringAdd(LStringAdd* instr) {
4464 DCHECK(ToRegister(instr->context()).is(cp));
4465 DCHECK(ToRegister(instr->left()).is(a1));
4466 DCHECK(ToRegister(instr->right()).is(a0));
4467 StringAddStub stub(isolate(),
4468 instr->hydrogen()->flags(),
4469 instr->hydrogen()->pretenure_flag());
4470 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4471}
4472
4473
4474void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4475 class DeferredStringCharCodeAt final : public LDeferredCode {
4476 public:
4477 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4478 : LDeferredCode(codegen), instr_(instr) { }
4479 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4480 LInstruction* instr() override { return instr_; }
4481
4482 private:
4483 LStringCharCodeAt* instr_;
4484 };
4485
4486 DeferredStringCharCodeAt* deferred =
4487 new(zone()) DeferredStringCharCodeAt(this, instr);
4488 StringCharLoadGenerator::Generate(masm(),
4489 ToRegister(instr->string()),
4490 ToRegister(instr->index()),
4491 ToRegister(instr->result()),
4492 deferred->entry());
4493 __ bind(deferred->exit());
4494}
4495
4496
4497void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4498 Register string = ToRegister(instr->string());
4499 Register result = ToRegister(instr->result());
4500 Register scratch = scratch0();
4501
4502 // TODO(3095996): Get rid of this. For now, we need to make the
4503 // result register contain a valid pointer because it is already
4504 // contained in the register pointer map.
4505 __ mov(result, zero_reg);
4506
4507 PushSafepointRegistersScope scope(this);
4508 __ push(string);
4509 // Push the index as a smi. This is safe because of the checks in
4510 // DoStringCharCodeAt above.
4511 if (instr->index()->IsConstantOperand()) {
4512 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4513 __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4514 __ push(scratch);
4515 } else {
4516 Register index = ToRegister(instr->index());
4517 __ SmiTag(index);
4518 __ push(index);
4519 }
4520 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4521 instr->context());
4522 __ AssertSmi(v0);
4523 __ SmiUntag(v0);
4524 __ StoreToSafepointRegisterSlot(v0, result);
4525}
4526
4527
4528void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4529 class DeferredStringCharFromCode final : public LDeferredCode {
4530 public:
4531 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4532 : LDeferredCode(codegen), instr_(instr) { }
4533 void Generate() override {
4534 codegen()->DoDeferredStringCharFromCode(instr_);
4535 }
4536 LInstruction* instr() override { return instr_; }
4537
4538 private:
4539 LStringCharFromCode* instr_;
4540 };
4541
4542 DeferredStringCharFromCode* deferred =
4543 new(zone()) DeferredStringCharFromCode(this, instr);
4544
4545 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4546 Register char_code = ToRegister(instr->char_code());
4547 Register result = ToRegister(instr->result());
4548 Register scratch = scratch0();
4549 DCHECK(!char_code.is(result));
4550
4551 __ Branch(deferred->entry(), hi,
4552 char_code, Operand(String::kMaxOneByteCharCode));
4553 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004554 __ Dlsa(result, result, char_code, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004555 __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4556 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4557 __ Branch(deferred->entry(), eq, result, Operand(scratch));
4558 __ bind(deferred->exit());
4559}
4560
4561
4562void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4563 Register char_code = ToRegister(instr->char_code());
4564 Register result = ToRegister(instr->result());
4565
4566 // TODO(3095996): Get rid of this. For now, we need to make the
4567 // result register contain a valid pointer because it is already
4568 // contained in the register pointer map.
4569 __ mov(result, zero_reg);
4570
4571 PushSafepointRegistersScope scope(this);
4572 __ SmiTag(char_code);
4573 __ push(char_code);
4574 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4575 instr->context());
4576 __ StoreToSafepointRegisterSlot(v0, result);
4577}
4578
4579
4580void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4581 LOperand* input = instr->value();
4582 DCHECK(input->IsRegister() || input->IsStackSlot());
4583 LOperand* output = instr->result();
4584 DCHECK(output->IsDoubleRegister());
4585 FPURegister single_scratch = double_scratch0().low();
4586 if (input->IsStackSlot()) {
4587 Register scratch = scratch0();
4588 __ ld(scratch, ToMemOperand(input));
4589 __ mtc1(scratch, single_scratch);
4590 } else {
4591 __ mtc1(ToRegister(input), single_scratch);
4592 }
4593 __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4594}
4595
4596
4597void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4598 LOperand* input = instr->value();
4599 LOperand* output = instr->result();
4600
4601 FPURegister dbl_scratch = double_scratch0();
4602 __ mtc1(ToRegister(input), dbl_scratch);
4603 __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch);
4604}
4605
4606
4607void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4608 class DeferredNumberTagU final : public LDeferredCode {
4609 public:
4610 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4611 : LDeferredCode(codegen), instr_(instr) { }
4612 void Generate() override {
4613 codegen()->DoDeferredNumberTagIU(instr_,
4614 instr_->value(),
4615 instr_->temp1(),
4616 instr_->temp2(),
4617 UNSIGNED_INT32);
4618 }
4619 LInstruction* instr() override { return instr_; }
4620
4621 private:
4622 LNumberTagU* instr_;
4623 };
4624
4625 Register input = ToRegister(instr->value());
4626 Register result = ToRegister(instr->result());
4627
4628 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4629 __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4630 __ SmiTag(result, input);
4631 __ bind(deferred->exit());
4632}
4633
4634
4635void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4636 LOperand* value,
4637 LOperand* temp1,
4638 LOperand* temp2,
4639 IntegerSignedness signedness) {
4640 Label done, slow;
4641 Register src = ToRegister(value);
4642 Register dst = ToRegister(instr->result());
4643 Register tmp1 = scratch0();
4644 Register tmp2 = ToRegister(temp1);
4645 Register tmp3 = ToRegister(temp2);
4646 DoubleRegister dbl_scratch = double_scratch0();
4647
4648 if (signedness == SIGNED_INT32) {
4649 // There was overflow, so bits 30 and 31 of the original integer
4650 // disagree. Try to allocate a heap number in new space and store
4651 // the value in there. If that fails, call the runtime system.
4652 if (dst.is(src)) {
4653 __ SmiUntag(src, dst);
4654 __ Xor(src, src, Operand(0x80000000));
4655 }
4656 __ mtc1(src, dbl_scratch);
4657 __ cvt_d_w(dbl_scratch, dbl_scratch);
4658 } else {
4659 __ mtc1(src, dbl_scratch);
4660 __ Cvt_d_uw(dbl_scratch, dbl_scratch);
4661 }
4662
4663 if (FLAG_inline_new) {
4664 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4665 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, TAG_RESULT);
4666 __ Branch(&done);
4667 }
4668
4669 // Slow case: Call the runtime system to do the number allocation.
4670 __ bind(&slow);
4671 {
4672 // TODO(3095996): Put a valid pointer value in the stack slot where the
4673 // result register is stored, as this register is in the pointer map, but
4674 // contains an integer value.
4675 __ mov(dst, zero_reg);
4676 // Preserve the value of all registers.
4677 PushSafepointRegistersScope scope(this);
4678
4679 // NumberTagI and NumberTagD use the context from the frame, rather than
4680 // the environment's HContext or HInlinedContext value.
4681 // They only call Runtime::kAllocateHeapNumber.
4682 // The corresponding HChange instructions are added in a phase that does
4683 // not have easy access to the local context.
4684 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4685 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4686 RecordSafepointWithRegisters(
4687 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4688 __ StoreToSafepointRegisterSlot(v0, dst);
4689 }
4690
4691 // Done. Put the value in dbl_scratch into the value of the allocated heap
4692 // number.
4693 __ bind(&done);
4694 __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4695}
4696
4697
4698void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4699 class DeferredNumberTagD final : public LDeferredCode {
4700 public:
4701 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4702 : LDeferredCode(codegen), instr_(instr) { }
4703 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4704 LInstruction* instr() override { return instr_; }
4705
4706 private:
4707 LNumberTagD* instr_;
4708 };
4709
4710 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4711 Register scratch = scratch0();
4712 Register reg = ToRegister(instr->result());
4713 Register temp1 = ToRegister(instr->temp());
4714 Register temp2 = ToRegister(instr->temp2());
4715
4716 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4717 if (FLAG_inline_new) {
4718 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4719 // We want the untagged address first for performance
4720 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4721 DONT_TAG_RESULT);
4722 } else {
4723 __ Branch(deferred->entry());
4724 }
4725 __ bind(deferred->exit());
4726 __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4727 // Now that we have finished with the object's real address tag it
4728 __ Daddu(reg, reg, kHeapObjectTag);
4729}
4730
4731
4732void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4733 // TODO(3095996): Get rid of this. For now, we need to make the
4734 // result register contain a valid pointer because it is already
4735 // contained in the register pointer map.
4736 Register reg = ToRegister(instr->result());
4737 __ mov(reg, zero_reg);
4738
4739 PushSafepointRegistersScope scope(this);
4740 // NumberTagI and NumberTagD use the context from the frame, rather than
4741 // the environment's HContext or HInlinedContext value.
4742 // They only call Runtime::kAllocateHeapNumber.
4743 // The corresponding HChange instructions are added in a phase that does
4744 // not have easy access to the local context.
4745 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4746 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4747 RecordSafepointWithRegisters(
4748 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4749 __ Dsubu(v0, v0, kHeapObjectTag);
4750 __ StoreToSafepointRegisterSlot(v0, reg);
4751}
4752
4753
4754void LCodeGen::DoSmiTag(LSmiTag* instr) {
4755 HChange* hchange = instr->hydrogen();
4756 Register input = ToRegister(instr->value());
4757 Register output = ToRegister(instr->result());
4758 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4759 hchange->value()->CheckFlag(HValue::kUint32)) {
4760 __ And(at, input, Operand(0x80000000));
4761 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4762 }
4763 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4764 !hchange->value()->CheckFlag(HValue::kUint32)) {
4765 __ SmiTagCheckOverflow(output, input, at);
4766 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4767 } else {
4768 __ SmiTag(output, input);
4769 }
4770}
4771
4772
4773void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4774 Register scratch = scratch0();
4775 Register input = ToRegister(instr->value());
4776 Register result = ToRegister(instr->result());
4777 if (instr->needs_check()) {
4778 STATIC_ASSERT(kHeapObjectTag == 1);
4779 // If the input is a HeapObject, value of scratch won't be zero.
4780 __ And(scratch, input, Operand(kHeapObjectTag));
4781 __ SmiUntag(result, input);
4782 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
4783 } else {
4784 __ SmiUntag(result, input);
4785 }
4786}
4787
4788
4789void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4790 DoubleRegister result_reg,
4791 NumberUntagDMode mode) {
4792 bool can_convert_undefined_to_nan =
4793 instr->hydrogen()->can_convert_undefined_to_nan();
4794 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4795
4796 Register scratch = scratch0();
4797 Label convert, load_smi, done;
4798 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4799 // Smi check.
4800 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4801 // Heap number map check.
4802 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4803 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4804 if (can_convert_undefined_to_nan) {
4805 __ Branch(&convert, ne, scratch, Operand(at));
4806 } else {
4807 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
4808 Operand(at));
4809 }
4810 // Load heap number.
4811 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4812 if (deoptimize_on_minus_zero) {
4813 __ mfc1(at, result_reg);
4814 __ Branch(&done, ne, at, Operand(zero_reg));
4815 __ mfhc1(scratch, result_reg); // Get exponent/sign bits.
4816 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
4817 Operand(HeapNumber::kSignMask));
4818 }
4819 __ Branch(&done);
4820 if (can_convert_undefined_to_nan) {
4821 __ bind(&convert);
4822 // Convert undefined (and hole) to NaN.
4823 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4824 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
4825 Operand(at));
4826 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4827 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4828 __ Branch(&done);
4829 }
4830 } else {
4831 __ SmiUntag(scratch, input_reg);
4832 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4833 }
4834 // Smi to double register conversion
4835 __ bind(&load_smi);
4836 // scratch: untagged value of input_reg
4837 __ mtc1(scratch, result_reg);
4838 __ cvt_d_w(result_reg, result_reg);
4839 __ bind(&done);
4840}
4841
4842
4843void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4844 Register input_reg = ToRegister(instr->value());
4845 Register scratch1 = scratch0();
4846 Register scratch2 = ToRegister(instr->temp());
4847 DoubleRegister double_scratch = double_scratch0();
4848 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4849
4850 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4851 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4852
4853 Label done;
4854
4855 // The input is a tagged HeapObject.
4856 // Heap number map check.
4857 __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4858 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4859 // This 'at' value and scratch1 map value are used for tests in both clauses
4860 // of the if.
4861
4862 if (instr->truncating()) {
4863 // Performs a truncating conversion of a floating point number as used by
4864 // the JS bitwise operations.
4865 Label no_heap_number, check_bools, check_false;
4866 // Check HeapNumber map.
4867 __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
4868 __ mov(scratch2, input_reg); // In delay slot.
4869 __ TruncateHeapNumberToI(input_reg, scratch2);
4870 __ Branch(&done);
4871
4872 // Check for Oddballs. Undefined/False is converted to zero and True to one
4873 // for truncating conversions.
4874 __ bind(&no_heap_number);
4875 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4876 __ Branch(&check_bools, ne, input_reg, Operand(at));
4877 DCHECK(ToRegister(instr->result()).is(input_reg));
4878 __ Branch(USE_DELAY_SLOT, &done);
4879 __ mov(input_reg, zero_reg); // In delay slot.
4880
4881 __ bind(&check_bools);
4882 __ LoadRoot(at, Heap::kTrueValueRootIndex);
4883 __ Branch(&check_false, ne, scratch2, Operand(at));
4884 __ Branch(USE_DELAY_SLOT, &done);
4885 __ li(input_reg, Operand(1)); // In delay slot.
4886
4887 __ bind(&check_false);
4888 __ LoadRoot(at, Heap::kFalseValueRootIndex);
4889 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
4890 scratch2, Operand(at));
4891 __ Branch(USE_DELAY_SLOT, &done);
4892 __ mov(input_reg, zero_reg); // In delay slot.
4893 } else {
4894 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
4895 Operand(at));
4896
4897 // Load the double value.
4898 __ ldc1(double_scratch,
4899 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4900
4901 Register except_flag = scratch2;
4902 __ EmitFPUTruncate(kRoundToZero,
4903 input_reg,
4904 double_scratch,
4905 scratch1,
4906 double_scratch2,
4907 except_flag,
4908 kCheckForInexactConversion);
4909
4910 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4911 Operand(zero_reg));
4912
4913 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4914 __ Branch(&done, ne, input_reg, Operand(zero_reg));
4915
4916 __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits.
4917 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4918 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
4919 Operand(zero_reg));
4920 }
4921 }
4922 __ bind(&done);
4923}
4924
4925
4926void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4927 class DeferredTaggedToI final : public LDeferredCode {
4928 public:
4929 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4930 : LDeferredCode(codegen), instr_(instr) { }
4931 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4932 LInstruction* instr() override { return instr_; }
4933
4934 private:
4935 LTaggedToI* instr_;
4936 };
4937
4938 LOperand* input = instr->value();
4939 DCHECK(input->IsRegister());
4940 DCHECK(input->Equals(instr->result()));
4941
4942 Register input_reg = ToRegister(input);
4943
4944 if (instr->hydrogen()->value()->representation().IsSmi()) {
4945 __ SmiUntag(input_reg);
4946 } else {
4947 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4948
4949 // Let the deferred code handle the HeapObject case.
4950 __ JumpIfNotSmi(input_reg, deferred->entry());
4951
4952 // Smi to int32 conversion.
4953 __ SmiUntag(input_reg);
4954 __ bind(deferred->exit());
4955 }
4956}
4957
4958
4959void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4960 LOperand* input = instr->value();
4961 DCHECK(input->IsRegister());
4962 LOperand* result = instr->result();
4963 DCHECK(result->IsDoubleRegister());
4964
4965 Register input_reg = ToRegister(input);
4966 DoubleRegister result_reg = ToDoubleRegister(result);
4967
4968 HValue* value = instr->hydrogen()->value();
4969 NumberUntagDMode mode = value->representation().IsSmi()
4970 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4971
4972 EmitNumberUntagD(instr, input_reg, result_reg, mode);
4973}
4974
4975
4976void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4977 Register result_reg = ToRegister(instr->result());
4978 Register scratch1 = scratch0();
4979 DoubleRegister double_input = ToDoubleRegister(instr->value());
4980
4981 if (instr->truncating()) {
4982 __ TruncateDoubleToI(result_reg, double_input);
4983 } else {
4984 Register except_flag = LCodeGen::scratch1();
4985
4986 __ EmitFPUTruncate(kRoundToMinusInf,
4987 result_reg,
4988 double_input,
4989 scratch1,
4990 double_scratch0(),
4991 except_flag,
4992 kCheckForInexactConversion);
4993
4994 // Deopt if the operation did not succeed (except_flag != 0).
4995 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4996 Operand(zero_reg));
4997
4998 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4999 Label done;
5000 __ Branch(&done, ne, result_reg, Operand(zero_reg));
5001 __ mfhc1(scratch1, double_input); // Get exponent/sign bits.
5002 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5003 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
5004 Operand(zero_reg));
5005 __ bind(&done);
5006 }
5007 }
5008}
5009
5010
5011void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5012 Register result_reg = ToRegister(instr->result());
5013 Register scratch1 = LCodeGen::scratch0();
5014 DoubleRegister double_input = ToDoubleRegister(instr->value());
5015
5016 if (instr->truncating()) {
5017 __ TruncateDoubleToI(result_reg, double_input);
5018 } else {
5019 Register except_flag = LCodeGen::scratch1();
5020
5021 __ EmitFPUTruncate(kRoundToMinusInf,
5022 result_reg,
5023 double_input,
5024 scratch1,
5025 double_scratch0(),
5026 except_flag,
5027 kCheckForInexactConversion);
5028
5029 // Deopt if the operation did not succeed (except_flag != 0).
5030 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
5031 Operand(zero_reg));
5032
5033 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5034 Label done;
5035 __ Branch(&done, ne, result_reg, Operand(zero_reg));
5036 __ mfhc1(scratch1, double_input); // Get exponent/sign bits.
5037 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5038 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
5039 Operand(zero_reg));
5040 __ bind(&done);
5041 }
5042 }
5043 __ SmiTag(result_reg, result_reg);
5044}
5045
5046
5047void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5048 LOperand* input = instr->value();
5049 __ SmiTst(ToRegister(input), at);
5050 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
5051}
5052
5053
5054void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5055 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5056 LOperand* input = instr->value();
5057 __ SmiTst(ToRegister(input), at);
5058 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
5059 }
5060}
5061
5062
5063void LCodeGen::DoCheckArrayBufferNotNeutered(
5064 LCheckArrayBufferNotNeutered* instr) {
5065 Register view = ToRegister(instr->view());
5066 Register scratch = scratch0();
5067
5068 __ ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
5069 __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
5070 __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
5071 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg));
5072}
5073
5074
5075void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5076 Register input = ToRegister(instr->value());
5077 Register scratch = scratch0();
5078
5079 __ GetObjectType(input, scratch, scratch);
5080
5081 if (instr->hydrogen()->is_interval_check()) {
5082 InstanceType first;
5083 InstanceType last;
5084 instr->hydrogen()->GetCheckInterval(&first, &last);
5085
5086 // If there is only one type in the interval check for equality.
5087 if (first == last) {
5088 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
5089 Operand(first));
5090 } else {
5091 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
5092 Operand(first));
5093 // Omit check for the last type.
5094 if (last != LAST_TYPE) {
5095 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
5096 Operand(last));
5097 }
5098 }
5099 } else {
5100 uint8_t mask;
5101 uint8_t tag;
5102 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5103
5104 if (base::bits::IsPowerOfTwo32(mask)) {
5105 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5106 __ And(at, scratch, mask);
5107 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
5108 at, Operand(zero_reg));
5109 } else {
5110 __ And(scratch, scratch, Operand(mask));
5111 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
5112 Operand(tag));
5113 }
5114 }
5115}
5116
5117
5118void LCodeGen::DoCheckValue(LCheckValue* instr) {
5119 Register reg = ToRegister(instr->value());
5120 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5121 AllowDeferredHandleDereference smi_check;
5122 if (isolate()->heap()->InNewSpace(*object)) {
5123 Register reg = ToRegister(instr->value());
5124 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5125 __ li(at, Operand(cell));
5126 __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
5127 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
5128 } else {
5129 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
5130 }
5131}
5132
5133
5134void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5135 {
5136 PushSafepointRegistersScope scope(this);
5137 __ push(object);
5138 __ mov(cp, zero_reg);
5139 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5140 RecordSafepointWithRegisters(
5141 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5142 __ StoreToSafepointRegisterSlot(v0, scratch0());
5143 }
5144 __ SmiTst(scratch0(), at);
5145 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
5146 Operand(zero_reg));
5147}
5148
5149
5150void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5151 class DeferredCheckMaps final : public LDeferredCode {
5152 public:
5153 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5154 : LDeferredCode(codegen), instr_(instr), object_(object) {
5155 SetExit(check_maps());
5156 }
5157 void Generate() override {
5158 codegen()->DoDeferredInstanceMigration(instr_, object_);
5159 }
5160 Label* check_maps() { return &check_maps_; }
5161 LInstruction* instr() override { return instr_; }
5162
5163 private:
5164 LCheckMaps* instr_;
5165 Label check_maps_;
5166 Register object_;
5167 };
5168
5169 if (instr->hydrogen()->IsStabilityCheck()) {
5170 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5171 for (int i = 0; i < maps->size(); ++i) {
5172 AddStabilityDependency(maps->at(i).handle());
5173 }
5174 return;
5175 }
5176
5177 Register map_reg = scratch0();
5178 LOperand* input = instr->value();
5179 DCHECK(input->IsRegister());
5180 Register reg = ToRegister(input);
5181 __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5182
5183 DeferredCheckMaps* deferred = NULL;
5184 if (instr->hydrogen()->HasMigrationTarget()) {
5185 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5186 __ bind(deferred->check_maps());
5187 }
5188
5189 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5190 Label success;
5191 for (int i = 0; i < maps->size() - 1; i++) {
5192 Handle<Map> map = maps->at(i).handle();
5193 __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5194 }
5195 Handle<Map> map = maps->at(maps->size() - 1).handle();
5196 // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5197 if (instr->hydrogen()->HasMigrationTarget()) {
5198 __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5199 } else {
5200 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
5201 }
5202
5203 __ bind(&success);
5204}
5205
5206
5207void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5208 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5209 Register result_reg = ToRegister(instr->result());
5210 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5211 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5212}
5213
5214
5215void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5216 Register unclamped_reg = ToRegister(instr->unclamped());
5217 Register result_reg = ToRegister(instr->result());
5218 __ ClampUint8(result_reg, unclamped_reg);
5219}
5220
5221
5222void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5223 Register scratch = scratch0();
5224 Register input_reg = ToRegister(instr->unclamped());
5225 Register result_reg = ToRegister(instr->result());
5226 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5227 Label is_smi, done, heap_number;
5228
5229 // Both smi and heap number cases are handled.
5230 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5231
5232 // Check for heap number
5233 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5234 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5235
5236 // Check for undefined. Undefined is converted to zero for clamping
5237 // conversions.
5238 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
5239 Operand(factory()->undefined_value()));
5240 __ mov(result_reg, zero_reg);
5241 __ jmp(&done);
5242
5243 // Heap number
5244 __ bind(&heap_number);
5245 __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5246 HeapNumber::kValueOffset));
5247 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5248 __ jmp(&done);
5249
5250 __ bind(&is_smi);
5251 __ ClampUint8(result_reg, scratch);
5252
5253 __ bind(&done);
5254}
5255
5256
5257void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5258 DoubleRegister value_reg = ToDoubleRegister(instr->value());
5259 Register result_reg = ToRegister(instr->result());
5260 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5261 __ FmoveHigh(result_reg, value_reg);
5262 } else {
5263 __ FmoveLow(result_reg, value_reg);
5264 }
5265}
5266
5267
5268void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5269 Register hi_reg = ToRegister(instr->hi());
5270 Register lo_reg = ToRegister(instr->lo());
5271 DoubleRegister result_reg = ToDoubleRegister(instr->result());
5272 __ Move(result_reg, lo_reg, hi_reg);
5273}
5274
5275
5276void LCodeGen::DoAllocate(LAllocate* instr) {
5277 class DeferredAllocate final : public LDeferredCode {
5278 public:
5279 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5280 : LDeferredCode(codegen), instr_(instr) { }
5281 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5282 LInstruction* instr() override { return instr_; }
5283
5284 private:
5285 LAllocate* instr_;
5286 };
5287
5288 DeferredAllocate* deferred =
5289 new(zone()) DeferredAllocate(this, instr);
5290
5291 Register result = ToRegister(instr->result());
5292 Register scratch = ToRegister(instr->temp1());
5293 Register scratch2 = ToRegister(instr->temp2());
5294
5295 // Allocate memory for the object.
5296 AllocationFlags flags = TAG_OBJECT;
5297 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5298 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5299 }
5300 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5301 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5302 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5303 }
5304 if (instr->size()->IsConstantOperand()) {
5305 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5306 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5307 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5308 } else {
5309 Register size = ToRegister(instr->size());
5310 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5311 }
5312
5313 __ bind(deferred->exit());
5314
5315 if (instr->hydrogen()->MustPrefillWithFiller()) {
5316 STATIC_ASSERT(kHeapObjectTag == 1);
5317 if (instr->size()->IsConstantOperand()) {
5318 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5319 __ li(scratch, Operand(size - kHeapObjectTag));
5320 } else {
5321 __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5322 }
5323 __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5324 Label loop;
5325 __ bind(&loop);
5326 __ Dsubu(scratch, scratch, Operand(kPointerSize));
5327 __ Daddu(at, result, Operand(scratch));
5328 __ sd(scratch2, MemOperand(at));
5329 __ Branch(&loop, ge, scratch, Operand(zero_reg));
5330 }
5331}
5332
5333
5334void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5335 Register result = ToRegister(instr->result());
5336
5337 // TODO(3095996): Get rid of this. For now, we need to make the
5338 // result register contain a valid pointer because it is already
5339 // contained in the register pointer map.
5340 __ mov(result, zero_reg);
5341
5342 PushSafepointRegistersScope scope(this);
5343 if (instr->size()->IsRegister()) {
5344 Register size = ToRegister(instr->size());
5345 DCHECK(!size.is(result));
5346 __ SmiTag(size);
5347 __ push(size);
5348 } else {
5349 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5350 if (size >= 0 && size <= Smi::kMaxValue) {
5351 __ li(v0, Operand(Smi::FromInt(size)));
5352 __ Push(v0);
5353 } else {
5354 // We should never get here at runtime => abort
5355 __ stop("invalid allocation size");
5356 return;
5357 }
5358 }
5359
5360 int flags = AllocateDoubleAlignFlag::encode(
5361 instr->hydrogen()->MustAllocateDoubleAligned());
5362 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5363 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5364 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5365 } else {
5366 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5367 }
5368 __ li(v0, Operand(Smi::FromInt(flags)));
5369 __ Push(v0);
5370
5371 CallRuntimeFromDeferred(
5372 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5373 __ StoreToSafepointRegisterSlot(v0, result);
5374}
5375
5376
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005377void LCodeGen::DoTypeof(LTypeof* instr) {
5378 DCHECK(ToRegister(instr->value()).is(a3));
5379 DCHECK(ToRegister(instr->result()).is(v0));
5380 Label end, do_call;
5381 Register value_register = ToRegister(instr->value());
5382 __ JumpIfNotSmi(value_register, &do_call);
5383 __ li(v0, Operand(isolate()->factory()->number_string()));
5384 __ jmp(&end);
5385 __ bind(&do_call);
5386 TypeofStub stub(isolate());
5387 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5388 __ bind(&end);
5389}
5390
5391
5392void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5393 Register input = ToRegister(instr->value());
5394
5395 Register cmp1 = no_reg;
5396 Operand cmp2 = Operand(no_reg);
5397
5398 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5399 instr->FalseLabel(chunk_),
5400 input,
5401 instr->type_literal(),
5402 &cmp1,
5403 &cmp2);
5404
5405 DCHECK(cmp1.is_valid());
5406 DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
5407
5408 if (final_branch_condition != kNoCondition) {
5409 EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5410 }
5411}
5412
5413
5414Condition LCodeGen::EmitTypeofIs(Label* true_label,
5415 Label* false_label,
5416 Register input,
5417 Handle<String> type_name,
5418 Register* cmp1,
5419 Operand* cmp2) {
5420 // This function utilizes the delay slot heavily. This is used to load
5421 // values that are always usable without depending on the type of the input
5422 // register.
5423 Condition final_branch_condition = kNoCondition;
5424 Register scratch = scratch0();
5425 Factory* factory = isolate()->factory();
5426 if (String::Equals(type_name, factory->number_string())) {
5427 __ JumpIfSmi(input, true_label);
5428 __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5429 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5430 *cmp1 = input;
5431 *cmp2 = Operand(at);
5432 final_branch_condition = eq;
5433
5434 } else if (String::Equals(type_name, factory->string_string())) {
5435 __ JumpIfSmi(input, false_label);
5436 __ GetObjectType(input, input, scratch);
5437 *cmp1 = scratch;
5438 *cmp2 = Operand(FIRST_NONSTRING_TYPE);
5439 final_branch_condition = lt;
5440
5441 } else if (String::Equals(type_name, factory->symbol_string())) {
5442 __ JumpIfSmi(input, false_label);
5443 __ GetObjectType(input, input, scratch);
5444 *cmp1 = scratch;
5445 *cmp2 = Operand(SYMBOL_TYPE);
5446 final_branch_condition = eq;
5447
5448 } else if (String::Equals(type_name, factory->boolean_string())) {
5449 __ LoadRoot(at, Heap::kTrueValueRootIndex);
5450 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5451 __ LoadRoot(at, Heap::kFalseValueRootIndex);
5452 *cmp1 = at;
5453 *cmp2 = Operand(input);
5454 final_branch_condition = eq;
5455
5456 } else if (String::Equals(type_name, factory->undefined_string())) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005457 __ LoadRoot(at, Heap::kNullValueRootIndex);
5458 __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005459 // The first instruction of JumpIfSmi is an And - it is safe in the delay
5460 // slot.
5461 __ JumpIfSmi(input, false_label);
5462 // Check for undetectable objects => true.
5463 __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5464 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5465 __ And(at, at, 1 << Map::kIsUndetectable);
5466 *cmp1 = at;
5467 *cmp2 = Operand(zero_reg);
5468 final_branch_condition = ne;
5469
5470 } else if (String::Equals(type_name, factory->function_string())) {
5471 __ JumpIfSmi(input, false_label);
5472 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5473 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5474 __ And(scratch, scratch,
5475 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5476 *cmp1 = scratch;
5477 *cmp2 = Operand(1 << Map::kIsCallable);
5478 final_branch_condition = eq;
5479
5480 } else if (String::Equals(type_name, factory->object_string())) {
5481 __ JumpIfSmi(input, false_label);
5482 __ LoadRoot(at, Heap::kNullValueRootIndex);
5483 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5484 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5485 __ GetObjectType(input, scratch, scratch1());
5486 __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
5487 // Check for callable or undetectable objects => false.
5488 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5489 __ And(at, scratch,
5490 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5491 *cmp1 = at;
5492 *cmp2 = Operand(zero_reg);
5493 final_branch_condition = eq;
5494
5495// clang-format off
5496#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5497 } else if (String::Equals(type_name, factory->type##_string())) { \
5498 __ JumpIfSmi(input, false_label); \
5499 __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset)); \
5500 __ LoadRoot(at, Heap::k##Type##MapRootIndex); \
5501 *cmp1 = input; \
5502 *cmp2 = Operand(at); \
5503 final_branch_condition = eq;
5504 SIMD128_TYPES(SIMD128_TYPE)
5505#undef SIMD128_TYPE
5506 // clang-format on
5507
5508
5509 } else {
5510 *cmp1 = at;
5511 *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
5512 __ Branch(false_label);
5513 }
5514
5515 return final_branch_condition;
5516}
5517
5518
5519void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5520 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5521 // Ensure that we have enough space after the previous lazy-bailout
5522 // instruction for patching the code here.
5523 int current_pc = masm()->pc_offset();
5524 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5525 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5526 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5527 while (padding_size > 0) {
5528 __ nop();
5529 padding_size -= Assembler::kInstrSize;
5530 }
5531 }
5532 }
5533 last_lazy_deopt_pc_ = masm()->pc_offset();
5534}
5535
5536
5537void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5538 last_lazy_deopt_pc_ = masm()->pc_offset();
5539 DCHECK(instr->HasEnvironment());
5540 LEnvironment* env = instr->environment();
5541 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5542 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5543}
5544
5545
5546void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5547 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5548 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5549 // needed return address), even though the implementation of LAZY and EAGER is
5550 // now identical. When LAZY is eventually completely folded into EAGER, remove
5551 // the special case below.
5552 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5553 type = Deoptimizer::LAZY;
5554 }
5555
5556 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
5557 Operand(zero_reg));
5558}
5559
5560
5561void LCodeGen::DoDummy(LDummy* instr) {
5562 // Nothing to see here, move on!
5563}
5564
5565
5566void LCodeGen::DoDummyUse(LDummyUse* instr) {
5567 // Nothing to see here, move on!
5568}
5569
5570
5571void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5572 PushSafepointRegistersScope scope(this);
5573 LoadContextFromDeferred(instr->context());
5574 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5575 RecordSafepointWithLazyDeopt(
5576 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5577 DCHECK(instr->HasEnvironment());
5578 LEnvironment* env = instr->environment();
5579 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5580}
5581
5582
5583void LCodeGen::DoStackCheck(LStackCheck* instr) {
5584 class DeferredStackCheck final : public LDeferredCode {
5585 public:
5586 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5587 : LDeferredCode(codegen), instr_(instr) { }
5588 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5589 LInstruction* instr() override { return instr_; }
5590
5591 private:
5592 LStackCheck* instr_;
5593 };
5594
5595 DCHECK(instr->HasEnvironment());
5596 LEnvironment* env = instr->environment();
5597 // There is no LLazyBailout instruction for stack-checks. We have to
5598 // prepare for lazy deoptimization explicitly here.
5599 if (instr->hydrogen()->is_function_entry()) {
5600 // Perform stack overflow check.
5601 Label done;
5602 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5603 __ Branch(&done, hs, sp, Operand(at));
5604 DCHECK(instr->context()->IsRegister());
5605 DCHECK(ToRegister(instr->context()).is(cp));
5606 CallCode(isolate()->builtins()->StackCheck(),
5607 RelocInfo::CODE_TARGET,
5608 instr);
5609 __ bind(&done);
5610 } else {
5611 DCHECK(instr->hydrogen()->is_backwards_branch());
5612 // Perform stack overflow check if this goto needs it before jumping.
5613 DeferredStackCheck* deferred_stack_check =
5614 new(zone()) DeferredStackCheck(this, instr);
5615 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5616 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5617 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5618 __ bind(instr->done_label());
5619 deferred_stack_check->SetExit(instr->done_label());
5620 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5621 // Don't record a deoptimization index for the safepoint here.
5622 // This will be done explicitly when emitting call and the safepoint in
5623 // the deferred code.
5624 }
5625}
5626
5627
5628void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5629 // This is a pseudo-instruction that ensures that the environment here is
5630 // properly registered for deoptimization and records the assembler's PC
5631 // offset.
5632 LEnvironment* environment = instr->environment();
5633
5634 // If the environment were already registered, we would have no way of
5635 // backpatching it with the spill slot operands.
5636 DCHECK(!environment->HasBeenRegistered());
5637 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5638
5639 GenerateOsrPrologue();
5640}
5641
5642
5643void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5644 Register result = ToRegister(instr->result());
5645 Register object = ToRegister(instr->object());
5646
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005647 Label use_cache, call_runtime;
5648 DCHECK(object.is(a0));
Ben Murdoch097c5b22016-05-18 11:27:45 +01005649 __ CheckEnumCache(&call_runtime);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005650
5651 __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
5652 __ Branch(&use_cache);
5653
5654 // Get the set of properties to enumerate.
5655 __ bind(&call_runtime);
5656 __ push(object);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005657 CallRuntime(Runtime::kForInEnumerate, instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005658 __ bind(&use_cache);
5659}
5660
5661
5662void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5663 Register map = ToRegister(instr->map());
5664 Register result = ToRegister(instr->result());
5665 Label load_cache, done;
5666 __ EnumLength(result, map);
5667 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5668 __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5669 __ jmp(&done);
5670
5671 __ bind(&load_cache);
5672 __ LoadInstanceDescriptors(map, result);
5673 __ ld(result,
5674 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5675 __ ld(result,
5676 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5677 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
5678
5679 __ bind(&done);
5680}
5681
5682
5683void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5684 Register object = ToRegister(instr->value());
5685 Register map = ToRegister(instr->map());
5686 __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5687 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
5688}
5689
5690
5691void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5692 Register result,
5693 Register object,
5694 Register index) {
5695 PushSafepointRegistersScope scope(this);
5696 __ Push(object, index);
5697 __ mov(cp, zero_reg);
5698 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5699 RecordSafepointWithRegisters(
5700 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5701 __ StoreToSafepointRegisterSlot(v0, result);
5702}
5703
5704
5705void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5706 class DeferredLoadMutableDouble final : public LDeferredCode {
5707 public:
5708 DeferredLoadMutableDouble(LCodeGen* codegen,
5709 LLoadFieldByIndex* instr,
5710 Register result,
5711 Register object,
5712 Register index)
5713 : LDeferredCode(codegen),
5714 instr_(instr),
5715 result_(result),
5716 object_(object),
5717 index_(index) {
5718 }
5719 void Generate() override {
5720 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5721 }
5722 LInstruction* instr() override { return instr_; }
5723
5724 private:
5725 LLoadFieldByIndex* instr_;
5726 Register result_;
5727 Register object_;
5728 Register index_;
5729 };
5730
5731 Register object = ToRegister(instr->object());
5732 Register index = ToRegister(instr->index());
5733 Register result = ToRegister(instr->result());
5734 Register scratch = scratch0();
5735
5736 DeferredLoadMutableDouble* deferred;
5737 deferred = new(zone()) DeferredLoadMutableDouble(
5738 this, instr, result, object, index);
5739
5740 Label out_of_object, done;
5741
5742 __ And(scratch, index, Operand(Smi::FromInt(1)));
5743 __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
5744 __ dsra(index, index, 1);
5745
5746 __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5747 __ SmiScale(scratch, index, kPointerSizeLog2); // In delay slot.
5748 __ Daddu(scratch, object, scratch);
5749 __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5750
5751 __ Branch(&done);
5752
5753 __ bind(&out_of_object);
5754 __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5755 // Index is equal to negated out of object property index plus 1.
5756 __ Dsubu(scratch, result, scratch);
5757 __ ld(result, FieldMemOperand(scratch,
5758 FixedArray::kHeaderSize - kPointerSize));
5759 __ bind(deferred->exit());
5760 __ bind(&done);
5761}
5762
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005763#undef __
5764
5765} // namespace internal
5766} // namespace v8