blob: d114e4fa5ac2a06e59949ba58a0da41938b81b11 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
6
7#include "src/code-factory.h"
8#include "src/code-stubs.h"
9#include "src/crankshaft/hydrogen-osr.h"
10#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
11#include "src/ic/ic.h"
12#include "src/ic/stub-cache.h"
13#include "src/profiler/cpu-profiler.h"
14
15namespace v8 {
16namespace internal {
17
18
19class SafepointGenerator final : public CallWrapper {
20 public:
21 SafepointGenerator(LCodeGen* codegen,
22 LPointerMap* pointers,
23 Safepoint::DeoptMode mode)
24 : codegen_(codegen),
25 pointers_(pointers),
26 deopt_mode_(mode) { }
27 virtual ~SafepointGenerator() {}
28
29 void BeforeCall(int call_size) const override {}
30
31 void AfterCall() const override {
32 codegen_->RecordSafepoint(pointers_, deopt_mode_);
33 }
34
35 private:
36 LCodeGen* codegen_;
37 LPointerMap* pointers_;
38 Safepoint::DeoptMode deopt_mode_;
39};
40
41
42#define __ masm()->
43
44bool LCodeGen::GenerateCode() {
45 LPhase phase("Z_Code generation", chunk());
46 DCHECK(is_unused());
47 status_ = GENERATING;
48
49 // Open a frame scope to indicate that there is a frame on the stack. The
50 // NONE indicates that the scope shouldn't actually generate code to set up
51 // the frame (that is done in GeneratePrologue).
52 FrameScope frame_scope(masm_, StackFrame::NONE);
53
54 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
55 GenerateJumpTable() && GenerateSafepointTable();
56}
57
58
59void LCodeGen::FinishCode(Handle<Code> code) {
60 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +010061 code->set_stack_slots(GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000062 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
63 PopulateDeoptimizationData(code);
64}
65
66
67void LCodeGen::SaveCallerDoubles() {
68 DCHECK(info()->saves_caller_doubles());
69 DCHECK(NeedsEagerFrame());
70 Comment(";;; Save clobbered callee double registers");
71 int count = 0;
72 BitVector* doubles = chunk()->allocated_double_registers();
73 BitVector::Iterator save_iterator(doubles);
74 while (!save_iterator.Done()) {
75 __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
76 MemOperand(sp, count * kDoubleSize));
77 save_iterator.Advance();
78 count++;
79 }
80}
81
82
83void LCodeGen::RestoreCallerDoubles() {
84 DCHECK(info()->saves_caller_doubles());
85 DCHECK(NeedsEagerFrame());
86 Comment(";;; Restore clobbered callee double registers");
87 BitVector* doubles = chunk()->allocated_double_registers();
88 BitVector::Iterator save_iterator(doubles);
89 int count = 0;
90 while (!save_iterator.Done()) {
91 __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
92 MemOperand(sp, count * kDoubleSize));
93 save_iterator.Advance();
94 count++;
95 }
96}
97
98
99bool LCodeGen::GeneratePrologue() {
100 DCHECK(is_generating());
101
102 if (info()->IsOptimizing()) {
103 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
104
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000105 // a1: Callee's JS function.
106 // cp: Callee's context.
107 // fp: Caller's frame pointer.
108 // lr: Caller's pc.
109 }
110
111 info()->set_prologue_offset(masm_->pc_offset());
112 if (NeedsEagerFrame()) {
113 if (info()->IsStub()) {
Ben Murdochda12d292016-06-02 14:46:10 +0100114 __ StubPrologue(StackFrame::STUB);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000115 } else {
116 __ Prologue(info()->GeneratePreagedPrologue());
117 }
118 frame_is_built_ = true;
119 }
120
121 // Reserve space for the stack slots needed by the code.
122 int slots = GetStackSlotCount();
123 if (slots > 0) {
124 if (FLAG_debug_code) {
125 __ Dsubu(sp, sp, Operand(slots * kPointerSize));
126 __ Push(a0, a1);
127 __ Daddu(a0, sp, Operand(slots * kPointerSize));
128 __ li(a1, Operand(kSlotsZapValue));
129 Label loop;
130 __ bind(&loop);
131 __ Dsubu(a0, a0, Operand(kPointerSize));
132 __ sd(a1, MemOperand(a0, 2 * kPointerSize));
133 __ Branch(&loop, ne, a0, Operand(sp));
134 __ Pop(a0, a1);
135 } else {
136 __ Dsubu(sp, sp, Operand(slots * kPointerSize));
137 }
138 }
139
140 if (info()->saves_caller_doubles()) {
141 SaveCallerDoubles();
142 }
143 return !is_aborted();
144}
145
146
147void LCodeGen::DoPrologue(LPrologue* instr) {
148 Comment(";;; Prologue begin");
149
150 // Possibly allocate a local context.
151 if (info()->scope()->num_heap_slots() > 0) {
152 Comment(";;; Allocate local context");
153 bool need_write_barrier = true;
154 // Argument to NewContext is the function, which is in a1.
155 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
156 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
157 if (info()->scope()->is_script_scope()) {
158 __ push(a1);
159 __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
160 __ CallRuntime(Runtime::kNewScriptContext);
161 deopt_mode = Safepoint::kLazyDeopt;
162 } else if (slots <= FastNewContextStub::kMaximumSlots) {
163 FastNewContextStub stub(isolate(), slots);
164 __ CallStub(&stub);
165 // Result of FastNewContextStub is always in new space.
166 need_write_barrier = false;
167 } else {
168 __ push(a1);
169 __ CallRuntime(Runtime::kNewFunctionContext);
170 }
171 RecordSafepoint(deopt_mode);
172
173 // Context is returned in both v0. It replaces the context passed to us.
174 // It's saved in the stack and kept live in cp.
175 __ mov(cp, v0);
176 __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
177 // Copy any necessary parameters into the context.
178 int num_parameters = scope()->num_parameters();
179 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
180 for (int i = first_parameter; i < num_parameters; i++) {
181 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
182 if (var->IsContextSlot()) {
183 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
184 (num_parameters - 1 - i) * kPointerSize;
185 // Load parameter from stack.
186 __ ld(a0, MemOperand(fp, parameter_offset));
187 // Store it in the context.
188 MemOperand target = ContextMemOperand(cp, var->index());
189 __ sd(a0, target);
190 // Update the write barrier. This clobbers a3 and a0.
191 if (need_write_barrier) {
192 __ RecordWriteContextSlot(
193 cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
194 } else if (FLAG_debug_code) {
195 Label done;
196 __ JumpIfInNewSpace(cp, a0, &done);
197 __ Abort(kExpectedNewSpaceObject);
198 __ bind(&done);
199 }
200 }
201 }
202 Comment(";;; End allocate local context");
203 }
204
205 Comment(";;; Prologue end");
206}
207
208
209void LCodeGen::GenerateOsrPrologue() {
210 // Generate the OSR entry prologue at the first unknown OSR value, or if there
211 // are none, at the OSR entrypoint instruction.
212 if (osr_pc_offset_ >= 0) return;
213
214 osr_pc_offset_ = masm()->pc_offset();
215
216 // Adjust the frame size, subsuming the unoptimized frame into the
217 // optimized frame.
218 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
219 DCHECK(slots >= 0);
220 __ Dsubu(sp, sp, Operand(slots * kPointerSize));
221}
222
223
224void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
225 if (instr->IsCall()) {
226 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
227 }
228 if (!instr->IsLazyBailout() && !instr->IsGap()) {
229 safepoints_.BumpLastLazySafepointIndex();
230 }
231}
232
233
234bool LCodeGen::GenerateDeferredCode() {
235 DCHECK(is_generating());
236 if (deferred_.length() > 0) {
237 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
238 LDeferredCode* code = deferred_[i];
239
240 HValue* value =
241 instructions_->at(code->instruction_index())->hydrogen_value();
242 RecordAndWritePosition(
243 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
244
245 Comment(";;; <@%d,#%d> "
246 "-------------------- Deferred %s --------------------",
247 code->instruction_index(),
248 code->instr()->hydrogen_value()->id(),
249 code->instr()->Mnemonic());
250 __ bind(code->entry());
251 if (NeedsDeferredFrame()) {
252 Comment(";;; Build frame");
253 DCHECK(!frame_is_built_);
254 DCHECK(info()->IsStub());
255 frame_is_built_ = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000256 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
Ben Murdochda12d292016-06-02 14:46:10 +0100257 __ PushCommonFrame(scratch0());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000258 Comment(";;; Deferred code");
259 }
260 code->Generate();
261 if (NeedsDeferredFrame()) {
262 Comment(";;; Destroy frame");
263 DCHECK(frame_is_built_);
Ben Murdochda12d292016-06-02 14:46:10 +0100264 __ PopCommonFrame(scratch0());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000265 frame_is_built_ = false;
266 }
267 __ jmp(code->exit());
268 }
269 }
270 // Deferred code is the last part of the instruction sequence. Mark
271 // the generated code as done unless we bailed out.
272 if (!is_aborted()) status_ = DONE;
273 return !is_aborted();
274}
275
276
277bool LCodeGen::GenerateJumpTable() {
278 if (jump_table_.length() > 0) {
279 Comment(";;; -------------------- Jump table --------------------");
280 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
281 Label table_start, call_deopt_entry;
282
283 __ bind(&table_start);
284 Label needs_frame;
285 Address base = jump_table_[0]->address;
286 for (int i = 0; i < jump_table_.length(); i++) {
287 Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
288 __ bind(&table_entry->label);
289 Address entry = table_entry->address;
290 DeoptComment(table_entry->deopt_info);
291
292 // Second-level deopt table entries are contiguous and small, so instead
293 // of loading the full, absolute address of each one, load the base
294 // address and add an immediate offset.
295 if (is_int16(entry - base)) {
296 if (table_entry->needs_frame) {
297 DCHECK(!info()->saves_caller_doubles());
298 Comment(";;; call deopt with frame");
Ben Murdochda12d292016-06-02 14:46:10 +0100299 __ PushCommonFrame();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000300 __ BranchAndLink(&needs_frame, USE_DELAY_SLOT);
301 __ li(t9, Operand(entry - base));
302 } else {
303 __ BranchAndLink(&call_deopt_entry, USE_DELAY_SLOT);
304 __ li(t9, Operand(entry - base));
305 }
306
307 } else {
308 __ li(t9, Operand(entry - base));
309 if (table_entry->needs_frame) {
310 DCHECK(!info()->saves_caller_doubles());
311 Comment(";;; call deopt with frame");
Ben Murdochda12d292016-06-02 14:46:10 +0100312 __ PushCommonFrame();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000313 __ BranchAndLink(&needs_frame);
314 } else {
315 __ BranchAndLink(&call_deopt_entry);
316 }
317 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000318 }
319 if (needs_frame.is_linked()) {
320 __ bind(&needs_frame);
321 // This variant of deopt can only be used with stubs. Since we don't
322 // have a function pointer to install in the stack frame that we're
323 // building, install a special marker there instead.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000324 __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
325 __ push(at);
Ben Murdochda12d292016-06-02 14:46:10 +0100326 DCHECK(info()->IsStub());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000327 }
328
329 Comment(";;; call deopt");
330 __ bind(&call_deopt_entry);
331
332 if (info()->saves_caller_doubles()) {
333 DCHECK(info()->IsStub());
334 RestoreCallerDoubles();
335 }
336
337 __ li(at,
338 Operand(reinterpret_cast<int64_t>(base), RelocInfo::RUNTIME_ENTRY));
339 __ Daddu(t9, t9, Operand(at));
340 __ Jump(t9);
341 }
342 // The deoptimization jump table is the last part of the instruction
343 // sequence. Mark the generated code as done unless we bailed out.
344 if (!is_aborted()) status_ = DONE;
345 return !is_aborted();
346}
347
348
349bool LCodeGen::GenerateSafepointTable() {
350 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100351 safepoints_.Emit(masm(), GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000352 return !is_aborted();
353}
354
355
356Register LCodeGen::ToRegister(int index) const {
357 return Register::from_code(index);
358}
359
360
361DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
362 return DoubleRegister::from_code(index);
363}
364
365
366Register LCodeGen::ToRegister(LOperand* op) const {
367 DCHECK(op->IsRegister());
368 return ToRegister(op->index());
369}
370
371
372Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
373 if (op->IsRegister()) {
374 return ToRegister(op->index());
375 } else if (op->IsConstantOperand()) {
376 LConstantOperand* const_op = LConstantOperand::cast(op);
377 HConstant* constant = chunk_->LookupConstant(const_op);
378 Handle<Object> literal = constant->handle(isolate());
379 Representation r = chunk_->LookupLiteralRepresentation(const_op);
380 if (r.IsInteger32()) {
381 AllowDeferredHandleDereference get_number;
382 DCHECK(literal->IsNumber());
383 __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
384 } else if (r.IsSmi()) {
385 DCHECK(constant->HasSmiValue());
386 __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
387 } else if (r.IsDouble()) {
388 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
389 } else {
390 DCHECK(r.IsSmiOrTagged());
391 __ li(scratch, literal);
392 }
393 return scratch;
394 } else if (op->IsStackSlot()) {
395 __ ld(scratch, ToMemOperand(op));
396 return scratch;
397 }
398 UNREACHABLE();
399 return scratch;
400}
401
402
403DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
404 DCHECK(op->IsDoubleRegister());
405 return ToDoubleRegister(op->index());
406}
407
408
409DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
410 FloatRegister flt_scratch,
411 DoubleRegister dbl_scratch) {
412 if (op->IsDoubleRegister()) {
413 return ToDoubleRegister(op->index());
414 } else if (op->IsConstantOperand()) {
415 LConstantOperand* const_op = LConstantOperand::cast(op);
416 HConstant* constant = chunk_->LookupConstant(const_op);
417 Handle<Object> literal = constant->handle(isolate());
418 Representation r = chunk_->LookupLiteralRepresentation(const_op);
419 if (r.IsInteger32()) {
420 DCHECK(literal->IsNumber());
421 __ li(at, Operand(static_cast<int32_t>(literal->Number())));
422 __ mtc1(at, flt_scratch);
423 __ cvt_d_w(dbl_scratch, flt_scratch);
424 return dbl_scratch;
425 } else if (r.IsDouble()) {
426 Abort(kUnsupportedDoubleImmediate);
427 } else if (r.IsTagged()) {
428 Abort(kUnsupportedTaggedImmediate);
429 }
430 } else if (op->IsStackSlot()) {
431 MemOperand mem_op = ToMemOperand(op);
432 __ ldc1(dbl_scratch, mem_op);
433 return dbl_scratch;
434 }
435 UNREACHABLE();
436 return dbl_scratch;
437}
438
439
440Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
441 HConstant* constant = chunk_->LookupConstant(op);
442 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
443 return constant->handle(isolate());
444}
445
446
447bool LCodeGen::IsInteger32(LConstantOperand* op) const {
448 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
449}
450
451
452bool LCodeGen::IsSmi(LConstantOperand* op) const {
453 return chunk_->LookupLiteralRepresentation(op).IsSmi();
454}
455
456
457int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
458 // return ToRepresentation(op, Representation::Integer32());
459 HConstant* constant = chunk_->LookupConstant(op);
460 return constant->Integer32Value();
461}
462
463
464int64_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op,
465 const Representation& r) const {
466 HConstant* constant = chunk_->LookupConstant(op);
467 int32_t value = constant->Integer32Value();
468 if (r.IsInteger32()) return value;
469 DCHECK(r.IsSmiOrTagged());
470 return reinterpret_cast<int64_t>(Smi::FromInt(value));
471}
472
473
474Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
475 HConstant* constant = chunk_->LookupConstant(op);
476 return Smi::FromInt(constant->Integer32Value());
477}
478
479
480double LCodeGen::ToDouble(LConstantOperand* op) const {
481 HConstant* constant = chunk_->LookupConstant(op);
482 DCHECK(constant->HasDoubleValue());
483 return constant->DoubleValue();
484}
485
486
487Operand LCodeGen::ToOperand(LOperand* op) {
488 if (op->IsConstantOperand()) {
489 LConstantOperand* const_op = LConstantOperand::cast(op);
490 HConstant* constant = chunk()->LookupConstant(const_op);
491 Representation r = chunk_->LookupLiteralRepresentation(const_op);
492 if (r.IsSmi()) {
493 DCHECK(constant->HasSmiValue());
494 return Operand(Smi::FromInt(constant->Integer32Value()));
495 } else if (r.IsInteger32()) {
496 DCHECK(constant->HasInteger32Value());
497 return Operand(constant->Integer32Value());
498 } else if (r.IsDouble()) {
499 Abort(kToOperandUnsupportedDoubleImmediate);
500 }
501 DCHECK(r.IsTagged());
502 return Operand(constant->handle(isolate()));
503 } else if (op->IsRegister()) {
504 return Operand(ToRegister(op));
505 } else if (op->IsDoubleRegister()) {
506 Abort(kToOperandIsDoubleRegisterUnimplemented);
507 return Operand((int64_t)0);
508 }
509 // Stack slots not implemented, use ToMemOperand instead.
510 UNREACHABLE();
511 return Operand((int64_t)0);
512}
513
514
515static int ArgumentsOffsetWithoutFrame(int index) {
516 DCHECK(index < 0);
517 return -(index + 1) * kPointerSize;
518}
519
520
521MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
522 DCHECK(!op->IsRegister());
523 DCHECK(!op->IsDoubleRegister());
524 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
525 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100526 return MemOperand(fp, FrameSlotToFPOffset(op->index()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000527 } else {
528 // Retrieve parameter without eager stack-frame relative to the
529 // stack-pointer.
530 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
531 }
532}
533
534
535MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
536 DCHECK(op->IsDoubleStackSlot());
537 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100538 // return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
539 return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kIntSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000540 } else {
541 // Retrieve parameter without eager stack-frame relative to the
542 // stack-pointer.
543 // return MemOperand(
544 // sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
545 return MemOperand(
546 sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize);
547 }
548}
549
550
551void LCodeGen::WriteTranslation(LEnvironment* environment,
552 Translation* translation) {
553 if (environment == NULL) return;
554
555 // The translation includes one command per value in the environment.
556 int translation_size = environment->translation_size();
557
558 WriteTranslation(environment->outer(), translation);
559 WriteTranslationFrame(environment, translation);
560
561 int object_index = 0;
562 int dematerialized_index = 0;
563 for (int i = 0; i < translation_size; ++i) {
564 LOperand* value = environment->values()->at(i);
565 AddToTranslation(
566 environment, translation, value, environment->HasTaggedValueAt(i),
567 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
568 }
569}
570
571
572void LCodeGen::AddToTranslation(LEnvironment* environment,
573 Translation* translation,
574 LOperand* op,
575 bool is_tagged,
576 bool is_uint32,
577 int* object_index_pointer,
578 int* dematerialized_index_pointer) {
579 if (op == LEnvironment::materialization_marker()) {
580 int object_index = (*object_index_pointer)++;
581 if (environment->ObjectIsDuplicateAt(object_index)) {
582 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
583 translation->DuplicateObject(dupe_of);
584 return;
585 }
586 int object_length = environment->ObjectLengthAt(object_index);
587 if (environment->ObjectIsArgumentsAt(object_index)) {
588 translation->BeginArgumentsObject(object_length);
589 } else {
590 translation->BeginCapturedObject(object_length);
591 }
592 int dematerialized_index = *dematerialized_index_pointer;
593 int env_offset = environment->translation_size() + dematerialized_index;
594 *dematerialized_index_pointer += object_length;
595 for (int i = 0; i < object_length; ++i) {
596 LOperand* value = environment->values()->at(env_offset + i);
597 AddToTranslation(environment,
598 translation,
599 value,
600 environment->HasTaggedValueAt(env_offset + i),
601 environment->HasUint32ValueAt(env_offset + i),
602 object_index_pointer,
603 dematerialized_index_pointer);
604 }
605 return;
606 }
607
608 if (op->IsStackSlot()) {
609 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000610 if (is_tagged) {
611 translation->StoreStackSlot(index);
612 } else if (is_uint32) {
613 translation->StoreUint32StackSlot(index);
614 } else {
615 translation->StoreInt32StackSlot(index);
616 }
617 } else if (op->IsDoubleStackSlot()) {
618 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000619 translation->StoreDoubleStackSlot(index);
620 } else if (op->IsRegister()) {
621 Register reg = ToRegister(op);
622 if (is_tagged) {
623 translation->StoreRegister(reg);
624 } else if (is_uint32) {
625 translation->StoreUint32Register(reg);
626 } else {
627 translation->StoreInt32Register(reg);
628 }
629 } else if (op->IsDoubleRegister()) {
630 DoubleRegister reg = ToDoubleRegister(op);
631 translation->StoreDoubleRegister(reg);
632 } else if (op->IsConstantOperand()) {
633 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
634 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
635 translation->StoreLiteral(src_index);
636 } else {
637 UNREACHABLE();
638 }
639}
640
641
642void LCodeGen::CallCode(Handle<Code> code,
643 RelocInfo::Mode mode,
644 LInstruction* instr) {
645 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
646}
647
648
649void LCodeGen::CallCodeGeneric(Handle<Code> code,
650 RelocInfo::Mode mode,
651 LInstruction* instr,
652 SafepointMode safepoint_mode) {
653 DCHECK(instr != NULL);
654 __ Call(code, mode);
655 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
656}
657
658
659void LCodeGen::CallRuntime(const Runtime::Function* function,
660 int num_arguments,
661 LInstruction* instr,
662 SaveFPRegsMode save_doubles) {
663 DCHECK(instr != NULL);
664
665 __ CallRuntime(function, num_arguments, save_doubles);
666
667 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
668}
669
670
671void LCodeGen::LoadContextFromDeferred(LOperand* context) {
672 if (context->IsRegister()) {
673 __ Move(cp, ToRegister(context));
674 } else if (context->IsStackSlot()) {
675 __ ld(cp, ToMemOperand(context));
676 } else if (context->IsConstantOperand()) {
677 HConstant* constant =
678 chunk_->LookupConstant(LConstantOperand::cast(context));
679 __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
680 } else {
681 UNREACHABLE();
682 }
683}
684
685
686void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
687 int argc,
688 LInstruction* instr,
689 LOperand* context) {
690 LoadContextFromDeferred(context);
691 __ CallRuntimeSaveDoubles(id);
692 RecordSafepointWithRegisters(
693 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
694}
695
696
697void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
698 Safepoint::DeoptMode mode) {
699 environment->set_has_been_used();
700 if (!environment->HasBeenRegistered()) {
701 // Physical stack frame layout:
702 // -x ............. -4 0 ..................................... y
703 // [incoming arguments] [spill slots] [pushed outgoing arguments]
704
705 // Layout of the environment:
706 // 0 ..................................................... size-1
707 // [parameters] [locals] [expression stack including arguments]
708
709 // Layout of the translation:
710 // 0 ........................................................ size - 1 + 4
711 // [expression stack including arguments] [locals] [4 words] [parameters]
712 // |>------------ translation_size ------------<|
713
714 int frame_count = 0;
715 int jsframe_count = 0;
716 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
717 ++frame_count;
718 if (e->frame_type() == JS_FUNCTION) {
719 ++jsframe_count;
720 }
721 }
722 Translation translation(&translations_, frame_count, jsframe_count, zone());
723 WriteTranslation(environment, &translation);
724 int deoptimization_index = deoptimizations_.length();
725 int pc_offset = masm()->pc_offset();
726 environment->Register(deoptimization_index,
727 translation.index(),
728 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
729 deoptimizations_.Add(environment, zone());
730 }
731}
732
733
734void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
735 Deoptimizer::DeoptReason deopt_reason,
736 Deoptimizer::BailoutType bailout_type,
737 Register src1, const Operand& src2) {
738 LEnvironment* environment = instr->environment();
739 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
740 DCHECK(environment->HasBeenRegistered());
741 int id = environment->deoptimization_index();
742 Address entry =
743 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
744 if (entry == NULL) {
745 Abort(kBailoutWasNotPrepared);
746 return;
747 }
748
749 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
750 Register scratch = scratch0();
751 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
752 Label no_deopt;
753 __ Push(a1, scratch);
754 __ li(scratch, Operand(count));
755 __ lw(a1, MemOperand(scratch));
756 __ Subu(a1, a1, Operand(1));
757 __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
758 __ li(a1, Operand(FLAG_deopt_every_n_times));
759 __ sw(a1, MemOperand(scratch));
760 __ Pop(a1, scratch);
761
762 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
763 __ bind(&no_deopt);
764 __ sw(a1, MemOperand(scratch));
765 __ Pop(a1, scratch);
766 }
767
768 if (info()->ShouldTrapOnDeopt()) {
769 Label skip;
770 if (condition != al) {
771 __ Branch(&skip, NegateCondition(condition), src1, src2);
772 }
773 __ stop("trap_on_deopt");
774 __ bind(&skip);
775 }
776
Ben Murdochc5610432016-08-08 18:44:38 +0100777 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000778
779 DCHECK(info()->IsStub() || frame_is_built_);
780 // Go through jump table if we need to handle condition, build frame, or
781 // restore caller doubles.
782 if (condition == al && frame_is_built_ &&
783 !info()->saves_caller_doubles()) {
784 DeoptComment(deopt_info);
785 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000786 } else {
787 Deoptimizer::JumpTableEntry* table_entry =
788 new (zone()) Deoptimizer::JumpTableEntry(
789 entry, deopt_info, bailout_type, !frame_is_built_);
790 // We often have several deopts to the same entry, reuse the last
791 // jump entry if this is the case.
792 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
793 jump_table_.is_empty() ||
794 !table_entry->IsEquivalentTo(*jump_table_.last())) {
795 jump_table_.Add(table_entry, zone());
796 }
797 __ Branch(&jump_table_.last()->label, condition, src1, src2);
798 }
799}
800
801
802void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
803 Deoptimizer::DeoptReason deopt_reason,
804 Register src1, const Operand& src2) {
805 Deoptimizer::BailoutType bailout_type = info()->IsStub()
806 ? Deoptimizer::LAZY
807 : Deoptimizer::EAGER;
808 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
809}
810
811
812void LCodeGen::RecordSafepointWithLazyDeopt(
813 LInstruction* instr, SafepointMode safepoint_mode) {
814 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
815 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
816 } else {
817 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
818 RecordSafepointWithRegisters(
819 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
820 }
821}
822
823
824void LCodeGen::RecordSafepoint(
825 LPointerMap* pointers,
826 Safepoint::Kind kind,
827 int arguments,
828 Safepoint::DeoptMode deopt_mode) {
829 DCHECK(expected_safepoint_kind_ == kind);
830
831 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
832 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
833 kind, arguments, deopt_mode);
834 for (int i = 0; i < operands->length(); i++) {
835 LOperand* pointer = operands->at(i);
836 if (pointer->IsStackSlot()) {
837 safepoint.DefinePointerSlot(pointer->index(), zone());
838 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
839 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
840 }
841 }
842}
843
844
845void LCodeGen::RecordSafepoint(LPointerMap* pointers,
846 Safepoint::DeoptMode deopt_mode) {
847 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
848}
849
850
851void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
852 LPointerMap empty_pointers(zone());
853 RecordSafepoint(&empty_pointers, deopt_mode);
854}
855
856
857void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
858 int arguments,
859 Safepoint::DeoptMode deopt_mode) {
860 RecordSafepoint(
861 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
862}
863
864
865void LCodeGen::RecordAndWritePosition(int position) {
866 if (position == RelocInfo::kNoPosition) return;
867 masm()->positions_recorder()->RecordPosition(position);
868 masm()->positions_recorder()->WriteRecordedPositions();
869}
870
871
872static const char* LabelType(LLabel* label) {
873 if (label->is_loop_header()) return " (loop header)";
874 if (label->is_osr_entry()) return " (OSR entry)";
875 return "";
876}
877
878
879void LCodeGen::DoLabel(LLabel* label) {
880 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
881 current_instruction_,
882 label->hydrogen_value()->id(),
883 label->block_id(),
884 LabelType(label));
885 __ bind(label->label());
886 current_block_ = label->block_id();
887 DoGap(label);
888}
889
890
891void LCodeGen::DoParallelMove(LParallelMove* move) {
892 resolver_.Resolve(move);
893}
894
895
896void LCodeGen::DoGap(LGap* gap) {
897 for (int i = LGap::FIRST_INNER_POSITION;
898 i <= LGap::LAST_INNER_POSITION;
899 i++) {
900 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
901 LParallelMove* move = gap->GetParallelMove(inner_pos);
902 if (move != NULL) DoParallelMove(move);
903 }
904}
905
906
907void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
908 DoGap(instr);
909}
910
911
912void LCodeGen::DoParameter(LParameter* instr) {
913 // Nothing to do.
914}
915
916
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000917void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
918 GenerateOsrPrologue();
919}
920
921
922void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
923 Register dividend = ToRegister(instr->dividend());
924 int32_t divisor = instr->divisor();
925 DCHECK(dividend.is(ToRegister(instr->result())));
926
927 // Theoretically, a variation of the branch-free code for integer division by
928 // a power of 2 (calculating the remainder via an additional multiplication
929 // (which gets simplified to an 'and') and subtraction) should be faster, and
930 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
931 // indicate that positive dividends are heavily favored, so the branching
932 // version performs better.
933 HMod* hmod = instr->hydrogen();
934 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
935 Label dividend_is_not_negative, done;
936
937 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
938 __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
939 // Note: The code below even works when right contains kMinInt.
940 __ dsubu(dividend, zero_reg, dividend);
941 __ And(dividend, dividend, Operand(mask));
942 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
943 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
944 Operand(zero_reg));
945 }
946 __ Branch(USE_DELAY_SLOT, &done);
947 __ dsubu(dividend, zero_reg, dividend);
948 }
949
950 __ bind(&dividend_is_not_negative);
951 __ And(dividend, dividend, Operand(mask));
952 __ bind(&done);
953}
954
955
956void LCodeGen::DoModByConstI(LModByConstI* instr) {
957 Register dividend = ToRegister(instr->dividend());
958 int32_t divisor = instr->divisor();
959 Register result = ToRegister(instr->result());
960 DCHECK(!dividend.is(result));
961
962 if (divisor == 0) {
963 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
964 return;
965 }
966
967 __ TruncatingDiv(result, dividend, Abs(divisor));
968 __ Dmul(result, result, Operand(Abs(divisor)));
969 __ Dsubu(result, dividend, Operand(result));
970
971 // Check for negative zero.
972 HMod* hmod = instr->hydrogen();
973 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
974 Label remainder_not_zero;
975 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
976 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
977 Operand(zero_reg));
978 __ bind(&remainder_not_zero);
979 }
980}
981
982
983void LCodeGen::DoModI(LModI* instr) {
984 HMod* hmod = instr->hydrogen();
985 const Register left_reg = ToRegister(instr->left());
986 const Register right_reg = ToRegister(instr->right());
987 const Register result_reg = ToRegister(instr->result());
988
989 // div runs in the background while we check for special cases.
990 __ Dmod(result_reg, left_reg, right_reg);
991
992 Label done;
993 // Check for x % 0, we have to deopt in this case because we can't return a
994 // NaN.
995 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
996 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
997 Operand(zero_reg));
998 }
999
1000 // Check for kMinInt % -1, div will return kMinInt, which is not what we
1001 // want. We have to deopt if we care about -0, because we can't return that.
1002 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1003 Label no_overflow_possible;
1004 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1005 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1006 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
1007 } else {
1008 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1009 __ Branch(USE_DELAY_SLOT, &done);
1010 __ mov(result_reg, zero_reg);
1011 }
1012 __ bind(&no_overflow_possible);
1013 }
1014
1015 // If we care about -0, test if the dividend is <0 and the result is 0.
1016 __ Branch(&done, ge, left_reg, Operand(zero_reg));
1017
1018 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1019 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
1020 Operand(zero_reg));
1021 }
1022 __ bind(&done);
1023}
1024
1025
1026void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1027 Register dividend = ToRegister(instr->dividend());
1028 int32_t divisor = instr->divisor();
1029 Register result = ToRegister(instr->result());
1030 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1031 DCHECK(!result.is(dividend));
1032
1033 // Check for (0 / -x) that will produce negative zero.
1034 HDiv* hdiv = instr->hydrogen();
1035 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1036 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1037 Operand(zero_reg));
1038 }
1039 // Check for (kMinInt / -1).
1040 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1041 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
1042 }
1043 // Deoptimize if remainder will not be 0.
1044 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1045 divisor != 1 && divisor != -1) {
1046 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1047 __ And(at, dividend, Operand(mask));
1048 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
1049 }
1050
1051 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1052 __ Dsubu(result, zero_reg, dividend);
1053 return;
1054 }
1055 uint16_t shift = WhichPowerOf2Abs(divisor);
1056 if (shift == 0) {
1057 __ Move(result, dividend);
1058 } else if (shift == 1) {
1059 __ dsrl32(result, dividend, 31);
1060 __ Daddu(result, dividend, Operand(result));
1061 } else {
1062 __ dsra32(result, dividend, 31);
1063 __ dsrl32(result, result, 32 - shift);
1064 __ Daddu(result, dividend, Operand(result));
1065 }
1066 if (shift > 0) __ dsra(result, result, shift);
1067 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1068}
1069
1070
1071void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1072 Register dividend = ToRegister(instr->dividend());
1073 int32_t divisor = instr->divisor();
1074 Register result = ToRegister(instr->result());
1075 DCHECK(!dividend.is(result));
1076
1077 if (divisor == 0) {
1078 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1079 return;
1080 }
1081
1082 // Check for (0 / -x) that will produce negative zero.
1083 HDiv* hdiv = instr->hydrogen();
1084 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1085 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1086 Operand(zero_reg));
1087 }
1088
1089 __ TruncatingDiv(result, dividend, Abs(divisor));
1090 if (divisor < 0) __ Subu(result, zero_reg, result);
1091
1092 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1093 __ Dmul(scratch0(), result, Operand(divisor));
1094 __ Dsubu(scratch0(), scratch0(), dividend);
1095 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
1096 Operand(zero_reg));
1097 }
1098}
1099
1100
1101// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1102void LCodeGen::DoDivI(LDivI* instr) {
1103 HBinaryOperation* hdiv = instr->hydrogen();
1104 Register dividend = ToRegister(instr->dividend());
1105 Register divisor = ToRegister(instr->divisor());
1106 const Register result = ToRegister(instr->result());
1107
1108 // On MIPS div is asynchronous - it will run in the background while we
1109 // check for special cases.
1110 __ Div(result, dividend, divisor);
1111
1112 // Check for x / 0.
1113 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1114 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1115 Operand(zero_reg));
1116 }
1117
1118 // Check for (0 / -x) that will produce negative zero.
1119 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1120 Label left_not_zero;
1121 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1122 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1123 Operand(zero_reg));
1124 __ bind(&left_not_zero);
1125 }
1126
1127 // Check for (kMinInt / -1).
1128 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1129 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1130 Label left_not_min_int;
1131 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1132 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1133 __ bind(&left_not_min_int);
1134 }
1135
1136 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1137 // Calculate remainder.
1138 Register remainder = ToRegister(instr->temp());
1139 if (kArchVariant != kMips64r6) {
1140 __ mfhi(remainder);
1141 } else {
1142 __ dmod(remainder, dividend, divisor);
1143 }
1144 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
1145 Operand(zero_reg));
1146 }
1147}
1148
1149
1150void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1151 DoubleRegister addend = ToDoubleRegister(instr->addend());
1152 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1153 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1154
1155 // This is computed in-place.
1156 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1157
1158 __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0());
1159}
1160
1161
1162void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1163 Register dividend = ToRegister(instr->dividend());
1164 Register result = ToRegister(instr->result());
1165 int32_t divisor = instr->divisor();
1166 Register scratch = result.is(dividend) ? scratch0() : dividend;
1167 DCHECK(!result.is(dividend) || !scratch.is(dividend));
1168
1169 // If the divisor is 1, return the dividend.
1170 if (divisor == 1) {
1171 __ Move(result, dividend);
1172 return;
1173 }
1174
1175 // If the divisor is positive, things are easy: There can be no deopts and we
1176 // can simply do an arithmetic right shift.
1177 uint16_t shift = WhichPowerOf2Abs(divisor);
1178 if (divisor > 1) {
1179 __ dsra(result, dividend, shift);
1180 return;
1181 }
1182
1183 // If the divisor is negative, we have to negate and handle edge cases.
1184 // Dividend can be the same register as result so save the value of it
1185 // for checking overflow.
1186 __ Move(scratch, dividend);
1187
1188 __ Dsubu(result, zero_reg, dividend);
1189 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1190 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
1191 }
1192
1193 __ Xor(scratch, scratch, result);
1194 // Dividing by -1 is basically negation, unless we overflow.
1195 if (divisor == -1) {
1196 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1197 DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, result, Operand(kMaxInt));
1198 }
1199 return;
1200 }
1201
1202 // If the negation could not overflow, simply shifting is OK.
1203 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1204 __ dsra(result, result, shift);
1205 return;
1206 }
1207
1208 Label no_overflow, done;
1209 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
1210 __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE);
1211 __ Branch(&done);
1212 __ bind(&no_overflow);
1213 __ dsra(result, result, shift);
1214 __ bind(&done);
1215}
1216
1217
1218void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1219 Register dividend = ToRegister(instr->dividend());
1220 int32_t divisor = instr->divisor();
1221 Register result = ToRegister(instr->result());
1222 DCHECK(!dividend.is(result));
1223
1224 if (divisor == 0) {
1225 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1226 return;
1227 }
1228
1229 // Check for (0 / -x) that will produce negative zero.
1230 HMathFloorOfDiv* hdiv = instr->hydrogen();
1231 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1232 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1233 Operand(zero_reg));
1234 }
1235
1236 // Easy case: We need no dynamic check for the dividend and the flooring
1237 // division is the same as the truncating division.
1238 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1239 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1240 __ TruncatingDiv(result, dividend, Abs(divisor));
1241 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1242 return;
1243 }
1244
1245 // In the general case we may need to adjust before and after the truncating
1246 // division to get a flooring division.
1247 Register temp = ToRegister(instr->temp());
1248 DCHECK(!temp.is(dividend) && !temp.is(result));
1249 Label needs_adjustment, done;
1250 __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1251 dividend, Operand(zero_reg));
1252 __ TruncatingDiv(result, dividend, Abs(divisor));
1253 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1254 __ jmp(&done);
1255 __ bind(&needs_adjustment);
1256 __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1257 __ TruncatingDiv(result, temp, Abs(divisor));
1258 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1259 __ Dsubu(result, result, Operand(1));
1260 __ bind(&done);
1261}
1262
1263
1264// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1265void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1266 HBinaryOperation* hdiv = instr->hydrogen();
1267 Register dividend = ToRegister(instr->dividend());
1268 Register divisor = ToRegister(instr->divisor());
1269 const Register result = ToRegister(instr->result());
1270
1271 // On MIPS div is asynchronous - it will run in the background while we
1272 // check for special cases.
1273 __ Ddiv(result, dividend, divisor);
1274
1275 // Check for x / 0.
1276 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1277 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1278 Operand(zero_reg));
1279 }
1280
1281 // Check for (0 / -x) that will produce negative zero.
1282 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1283 Label left_not_zero;
1284 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1285 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1286 Operand(zero_reg));
1287 __ bind(&left_not_zero);
1288 }
1289
1290 // Check for (kMinInt / -1).
1291 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1292 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1293 Label left_not_min_int;
1294 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1295 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1296 __ bind(&left_not_min_int);
1297 }
1298
1299 // We performed a truncating division. Correct the result if necessary.
1300 Label done;
1301 Register remainder = scratch0();
1302 if (kArchVariant != kMips64r6) {
1303 __ mfhi(remainder);
1304 } else {
1305 __ dmod(remainder, dividend, divisor);
1306 }
1307 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1308 __ Xor(remainder, remainder, Operand(divisor));
1309 __ Branch(&done, ge, remainder, Operand(zero_reg));
1310 __ Dsubu(result, result, Operand(1));
1311 __ bind(&done);
1312}
1313
1314
1315void LCodeGen::DoMulS(LMulS* instr) {
1316 Register scratch = scratch0();
1317 Register result = ToRegister(instr->result());
1318 // Note that result may alias left.
1319 Register left = ToRegister(instr->left());
1320 LOperand* right_op = instr->right();
1321
1322 bool bailout_on_minus_zero =
1323 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1324 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1325
1326 if (right_op->IsConstantOperand()) {
1327 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1328
1329 if (bailout_on_minus_zero && (constant < 0)) {
1330 // The case of a null constant will be handled separately.
1331 // If constant is negative and left is null, the result should be -0.
1332 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
1333 }
1334
1335 switch (constant) {
1336 case -1:
1337 if (overflow) {
Ben Murdochda12d292016-06-02 14:46:10 +01001338 Label no_overflow;
1339 __ DsubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
1340 DeoptimizeIf(al, instr);
1341 __ bind(&no_overflow);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001342 } else {
1343 __ Dsubu(result, zero_reg, left);
1344 }
1345 break;
1346 case 0:
1347 if (bailout_on_minus_zero) {
1348 // If left is strictly negative and the constant is null, the
1349 // result is -0. Deoptimize if required, otherwise return 0.
1350 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
1351 Operand(zero_reg));
1352 }
1353 __ mov(result, zero_reg);
1354 break;
1355 case 1:
1356 // Nothing to do.
1357 __ Move(result, left);
1358 break;
1359 default:
1360 // Multiplying by powers of two and powers of two plus or minus
1361 // one can be done faster with shifted operands.
1362 // For other constants we emit standard code.
1363 int32_t mask = constant >> 31;
1364 uint32_t constant_abs = (constant + mask) ^ mask;
1365
1366 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1367 int32_t shift = WhichPowerOf2(constant_abs);
1368 __ dsll(result, left, shift);
1369 // Correct the sign of the result if the constant is negative.
1370 if (constant < 0) __ Dsubu(result, zero_reg, result);
1371 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1372 int32_t shift = WhichPowerOf2(constant_abs - 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001373 __ Dlsa(result, left, left, shift);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001374 // Correct the sign of the result if the constant is negative.
1375 if (constant < 0) __ Dsubu(result, zero_reg, result);
1376 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1377 int32_t shift = WhichPowerOf2(constant_abs + 1);
1378 __ dsll(scratch, left, shift);
1379 __ Dsubu(result, scratch, left);
1380 // Correct the sign of the result if the constant is negative.
1381 if (constant < 0) __ Dsubu(result, zero_reg, result);
1382 } else {
1383 // Generate standard code.
1384 __ li(at, constant);
1385 __ Dmul(result, left, at);
1386 }
1387 }
1388 } else {
1389 DCHECK(right_op->IsRegister());
1390 Register right = ToRegister(right_op);
1391
1392 if (overflow) {
1393 // hi:lo = left * right.
1394 __ Dmulh(result, left, right);
1395 __ dsra32(scratch, result, 0);
1396 __ sra(at, result, 31);
1397 __ SmiTag(result);
1398 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
1399 } else {
1400 __ SmiUntag(result, left);
1401 __ dmul(result, result, right);
1402 }
1403
1404 if (bailout_on_minus_zero) {
1405 Label done;
1406 __ Xor(at, left, right);
1407 __ Branch(&done, ge, at, Operand(zero_reg));
1408 // Bail out if the result is minus zero.
1409 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
1410 Operand(zero_reg));
1411 __ bind(&done);
1412 }
1413 }
1414}
1415
1416
1417void LCodeGen::DoMulI(LMulI* instr) {
1418 Register scratch = scratch0();
1419 Register result = ToRegister(instr->result());
1420 // Note that result may alias left.
1421 Register left = ToRegister(instr->left());
1422 LOperand* right_op = instr->right();
1423
1424 bool bailout_on_minus_zero =
1425 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1426 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1427
1428 if (right_op->IsConstantOperand()) {
1429 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1430
1431 if (bailout_on_minus_zero && (constant < 0)) {
1432 // The case of a null constant will be handled separately.
1433 // If constant is negative and left is null, the result should be -0.
1434 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
1435 }
1436
1437 switch (constant) {
1438 case -1:
1439 if (overflow) {
Ben Murdochda12d292016-06-02 14:46:10 +01001440 Label no_overflow;
1441 __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
1442 DeoptimizeIf(al, instr);
1443 __ bind(&no_overflow);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001444 } else {
1445 __ Subu(result, zero_reg, left);
1446 }
1447 break;
1448 case 0:
1449 if (bailout_on_minus_zero) {
1450 // If left is strictly negative and the constant is null, the
1451 // result is -0. Deoptimize if required, otherwise return 0.
1452 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
1453 Operand(zero_reg));
1454 }
1455 __ mov(result, zero_reg);
1456 break;
1457 case 1:
1458 // Nothing to do.
1459 __ Move(result, left);
1460 break;
1461 default:
1462 // Multiplying by powers of two and powers of two plus or minus
1463 // one can be done faster with shifted operands.
1464 // For other constants we emit standard code.
1465 int32_t mask = constant >> 31;
1466 uint32_t constant_abs = (constant + mask) ^ mask;
1467
1468 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1469 int32_t shift = WhichPowerOf2(constant_abs);
1470 __ sll(result, left, shift);
1471 // Correct the sign of the result if the constant is negative.
1472 if (constant < 0) __ Subu(result, zero_reg, result);
1473 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1474 int32_t shift = WhichPowerOf2(constant_abs - 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001475 __ Lsa(result, left, left, shift);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001476 // Correct the sign of the result if the constant is negative.
1477 if (constant < 0) __ Subu(result, zero_reg, result);
1478 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1479 int32_t shift = WhichPowerOf2(constant_abs + 1);
1480 __ sll(scratch, left, shift);
1481 __ Subu(result, scratch, left);
1482 // Correct the sign of the result if the constant is negative.
1483 if (constant < 0) __ Subu(result, zero_reg, result);
1484 } else {
1485 // Generate standard code.
1486 __ li(at, constant);
1487 __ Mul(result, left, at);
1488 }
1489 }
1490
1491 } else {
1492 DCHECK(right_op->IsRegister());
1493 Register right = ToRegister(right_op);
1494
1495 if (overflow) {
1496 // hi:lo = left * right.
1497 __ Dmul(result, left, right);
1498 __ dsra32(scratch, result, 0);
1499 __ sra(at, result, 31);
1500
1501 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
1502 } else {
1503 __ mul(result, left, right);
1504 }
1505
1506 if (bailout_on_minus_zero) {
1507 Label done;
1508 __ Xor(at, left, right);
1509 __ Branch(&done, ge, at, Operand(zero_reg));
1510 // Bail out if the result is minus zero.
1511 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
1512 Operand(zero_reg));
1513 __ bind(&done);
1514 }
1515 }
1516}
1517
1518
1519void LCodeGen::DoBitI(LBitI* instr) {
1520 LOperand* left_op = instr->left();
1521 LOperand* right_op = instr->right();
1522 DCHECK(left_op->IsRegister());
1523 Register left = ToRegister(left_op);
1524 Register result = ToRegister(instr->result());
1525 Operand right(no_reg);
1526
1527 if (right_op->IsStackSlot()) {
1528 right = Operand(EmitLoadRegister(right_op, at));
1529 } else {
1530 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1531 right = ToOperand(right_op);
1532 }
1533
1534 switch (instr->op()) {
1535 case Token::BIT_AND:
1536 __ And(result, left, right);
1537 break;
1538 case Token::BIT_OR:
1539 __ Or(result, left, right);
1540 break;
1541 case Token::BIT_XOR:
1542 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1543 __ Nor(result, zero_reg, left);
1544 } else {
1545 __ Xor(result, left, right);
1546 }
1547 break;
1548 default:
1549 UNREACHABLE();
1550 break;
1551 }
1552}
1553
1554
1555void LCodeGen::DoShiftI(LShiftI* instr) {
1556 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1557 // result may alias either of them.
1558 LOperand* right_op = instr->right();
1559 Register left = ToRegister(instr->left());
1560 Register result = ToRegister(instr->result());
1561
1562 if (right_op->IsRegister()) {
1563 // No need to mask the right operand on MIPS, it is built into the variable
1564 // shift instructions.
1565 switch (instr->op()) {
1566 case Token::ROR:
1567 __ Ror(result, left, Operand(ToRegister(right_op)));
1568 break;
1569 case Token::SAR:
1570 __ srav(result, left, ToRegister(right_op));
1571 break;
1572 case Token::SHR:
1573 __ srlv(result, left, ToRegister(right_op));
1574 if (instr->can_deopt()) {
1575 // TODO(yy): (-1) >>> 0. anything else?
1576 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
1577 Operand(zero_reg));
1578 DeoptimizeIf(gt, instr, Deoptimizer::kNegativeValue, result,
1579 Operand(kMaxInt));
1580 }
1581 break;
1582 case Token::SHL:
1583 __ sllv(result, left, ToRegister(right_op));
1584 break;
1585 default:
1586 UNREACHABLE();
1587 break;
1588 }
1589 } else {
1590 // Mask the right_op operand.
1591 int value = ToInteger32(LConstantOperand::cast(right_op));
1592 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1593 switch (instr->op()) {
1594 case Token::ROR:
1595 if (shift_count != 0) {
1596 __ Ror(result, left, Operand(shift_count));
1597 } else {
1598 __ Move(result, left);
1599 }
1600 break;
1601 case Token::SAR:
1602 if (shift_count != 0) {
1603 __ sra(result, left, shift_count);
1604 } else {
1605 __ Move(result, left);
1606 }
1607 break;
1608 case Token::SHR:
1609 if (shift_count != 0) {
1610 __ srl(result, left, shift_count);
1611 } else {
1612 if (instr->can_deopt()) {
1613 __ And(at, left, Operand(0x80000000));
1614 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
1615 Operand(zero_reg));
1616 }
1617 __ Move(result, left);
1618 }
1619 break;
1620 case Token::SHL:
1621 if (shift_count != 0) {
1622 if (instr->hydrogen_value()->representation().IsSmi()) {
1623 __ dsll(result, left, shift_count);
1624 } else {
1625 __ sll(result, left, shift_count);
1626 }
1627 } else {
1628 __ Move(result, left);
1629 }
1630 break;
1631 default:
1632 UNREACHABLE();
1633 break;
1634 }
1635 }
1636}
1637
1638
1639void LCodeGen::DoSubS(LSubS* instr) {
1640 LOperand* left = instr->left();
1641 LOperand* right = instr->right();
1642 LOperand* result = instr->result();
1643 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1644
1645 if (!can_overflow) {
1646 DCHECK(right->IsRegister() || right->IsConstantOperand());
1647 __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
1648 } else { // can_overflow.
Ben Murdochda12d292016-06-02 14:46:10 +01001649 Register scratch = scratch0();
1650 Label no_overflow_label;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001651 DCHECK(right->IsRegister() || right->IsConstantOperand());
Ben Murdochda12d292016-06-02 14:46:10 +01001652 __ DsubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1653 &no_overflow_label, scratch);
1654 DeoptimizeIf(al, instr);
1655 __ bind(&no_overflow_label);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001656 }
1657}
1658
1659
1660void LCodeGen::DoSubI(LSubI* instr) {
1661 LOperand* left = instr->left();
1662 LOperand* right = instr->right();
1663 LOperand* result = instr->result();
1664 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1665
1666 if (!can_overflow) {
1667 DCHECK(right->IsRegister() || right->IsConstantOperand());
1668 __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1669 } else { // can_overflow.
Ben Murdochda12d292016-06-02 14:46:10 +01001670 Register scratch = scratch0();
1671 Label no_overflow_label;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001672 DCHECK(right->IsRegister() || right->IsConstantOperand());
Ben Murdochda12d292016-06-02 14:46:10 +01001673 __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1674 &no_overflow_label, scratch);
1675 DeoptimizeIf(al, instr);
1676 __ bind(&no_overflow_label);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001677 }
1678}
1679
1680
1681void LCodeGen::DoConstantI(LConstantI* instr) {
1682 __ li(ToRegister(instr->result()), Operand(instr->value()));
1683}
1684
1685
1686void LCodeGen::DoConstantS(LConstantS* instr) {
1687 __ li(ToRegister(instr->result()), Operand(instr->value()));
1688}
1689
1690
1691void LCodeGen::DoConstantD(LConstantD* instr) {
1692 DCHECK(instr->result()->IsDoubleRegister());
1693 DoubleRegister result = ToDoubleRegister(instr->result());
1694 double v = instr->value();
1695 __ Move(result, v);
1696}
1697
1698
1699void LCodeGen::DoConstantE(LConstantE* instr) {
1700 __ li(ToRegister(instr->result()), Operand(instr->value()));
1701}
1702
1703
1704void LCodeGen::DoConstantT(LConstantT* instr) {
1705 Handle<Object> object = instr->value(isolate());
1706 AllowDeferredHandleDereference smi_check;
1707 __ li(ToRegister(instr->result()), object);
1708}
1709
1710
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001711MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1712 LOperand* index,
1713 String::Encoding encoding) {
1714 if (index->IsConstantOperand()) {
1715 int offset = ToInteger32(LConstantOperand::cast(index));
1716 if (encoding == String::TWO_BYTE_ENCODING) {
1717 offset *= kUC16Size;
1718 }
1719 STATIC_ASSERT(kCharSize == 1);
1720 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1721 }
1722 Register scratch = scratch0();
1723 DCHECK(!scratch.is(string));
1724 DCHECK(!scratch.is(ToRegister(index)));
1725 if (encoding == String::ONE_BYTE_ENCODING) {
1726 __ Daddu(scratch, string, ToRegister(index));
1727 } else {
1728 STATIC_ASSERT(kUC16Size == 2);
1729 __ dsll(scratch, ToRegister(index), 1);
1730 __ Daddu(scratch, string, scratch);
1731 }
1732 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1733}
1734
1735
1736void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1737 String::Encoding encoding = instr->hydrogen()->encoding();
1738 Register string = ToRegister(instr->string());
1739 Register result = ToRegister(instr->result());
1740
1741 if (FLAG_debug_code) {
1742 Register scratch = scratch0();
1743 __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1744 __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1745
1746 __ And(scratch, scratch,
1747 Operand(kStringRepresentationMask | kStringEncodingMask));
1748 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1749 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1750 __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1751 ? one_byte_seq_type : two_byte_seq_type));
1752 __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1753 }
1754
1755 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1756 if (encoding == String::ONE_BYTE_ENCODING) {
1757 __ lbu(result, operand);
1758 } else {
1759 __ lhu(result, operand);
1760 }
1761}
1762
1763
1764void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1765 String::Encoding encoding = instr->hydrogen()->encoding();
1766 Register string = ToRegister(instr->string());
1767 Register value = ToRegister(instr->value());
1768
1769 if (FLAG_debug_code) {
1770 Register scratch = scratch0();
1771 Register index = ToRegister(instr->index());
1772 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1773 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1774 int encoding_mask =
1775 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1776 ? one_byte_seq_type : two_byte_seq_type;
1777 __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1778 }
1779
1780 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1781 if (encoding == String::ONE_BYTE_ENCODING) {
1782 __ sb(value, operand);
1783 } else {
1784 __ sh(value, operand);
1785 }
1786}
1787
1788
1789void LCodeGen::DoAddE(LAddE* instr) {
1790 LOperand* result = instr->result();
1791 LOperand* left = instr->left();
1792 LOperand* right = instr->right();
1793
1794 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1795 DCHECK(right->IsRegister() || right->IsConstantOperand());
1796 __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
1797}
1798
1799
1800void LCodeGen::DoAddS(LAddS* instr) {
1801 LOperand* left = instr->left();
1802 LOperand* right = instr->right();
1803 LOperand* result = instr->result();
1804 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1805
1806 if (!can_overflow) {
1807 DCHECK(right->IsRegister() || right->IsConstantOperand());
1808 __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
1809 } else { // can_overflow.
Ben Murdochda12d292016-06-02 14:46:10 +01001810 Label no_overflow_label;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001811 Register scratch = scratch1();
1812 DCHECK(right->IsRegister() || right->IsConstantOperand());
Ben Murdochda12d292016-06-02 14:46:10 +01001813 __ DaddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1814 &no_overflow_label, scratch);
1815 DeoptimizeIf(al, instr);
1816 __ bind(&no_overflow_label);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001817 }
1818}
1819
1820
1821void LCodeGen::DoAddI(LAddI* instr) {
1822 LOperand* left = instr->left();
1823 LOperand* right = instr->right();
1824 LOperand* result = instr->result();
1825 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1826
1827 if (!can_overflow) {
1828 DCHECK(right->IsRegister() || right->IsConstantOperand());
1829 __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1830 } else { // can_overflow.
Ben Murdochda12d292016-06-02 14:46:10 +01001831 Label no_overflow_label;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001832 Register scratch = scratch1();
1833 DCHECK(right->IsRegister() || right->IsConstantOperand());
Ben Murdochda12d292016-06-02 14:46:10 +01001834 __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1835 &no_overflow_label, scratch);
1836 DeoptimizeIf(al, instr);
1837 __ bind(&no_overflow_label);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001838 }
1839}
1840
1841
1842void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1843 LOperand* left = instr->left();
1844 LOperand* right = instr->right();
1845 HMathMinMax::Operation operation = instr->hydrogen()->operation();
Ben Murdochc5610432016-08-08 18:44:38 +01001846 Register scratch = scratch1();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001847 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
Ben Murdochc5610432016-08-08 18:44:38 +01001848 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001849 Register left_reg = ToRegister(left);
1850 Register right_reg = EmitLoadRegister(right, scratch0());
1851 Register result_reg = ToRegister(instr->result());
1852 Label return_right, done;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001853 __ Slt(scratch, left_reg, Operand(right_reg));
1854 if (condition == ge) {
1855 __ Movz(result_reg, left_reg, scratch);
1856 __ Movn(result_reg, right_reg, scratch);
1857 } else {
1858 DCHECK(condition == le);
1859 __ Movn(result_reg, left_reg, scratch);
1860 __ Movz(result_reg, right_reg, scratch);
1861 }
1862 } else {
1863 DCHECK(instr->hydrogen()->representation().IsDouble());
1864 FPURegister left_reg = ToDoubleRegister(left);
1865 FPURegister right_reg = ToDoubleRegister(right);
1866 FPURegister result_reg = ToDoubleRegister(instr->result());
Ben Murdochc5610432016-08-08 18:44:38 +01001867 Label nan, done;
1868 if (operation == HMathMinMax::kMathMax) {
1869 __ MaxNaNCheck_d(result_reg, left_reg, right_reg, &nan);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001870 } else {
Ben Murdochc5610432016-08-08 18:44:38 +01001871 DCHECK(operation == HMathMinMax::kMathMin);
1872 __ MinNaNCheck_d(result_reg, left_reg, right_reg, &nan);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001873 }
1874 __ Branch(&done);
1875
Ben Murdochc5610432016-08-08 18:44:38 +01001876 __ bind(&nan);
1877 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
1878 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001879
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001880 __ bind(&done);
1881 }
1882}
1883
1884
1885void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1886 DoubleRegister left = ToDoubleRegister(instr->left());
1887 DoubleRegister right = ToDoubleRegister(instr->right());
1888 DoubleRegister result = ToDoubleRegister(instr->result());
1889 switch (instr->op()) {
1890 case Token::ADD:
1891 __ add_d(result, left, right);
1892 break;
1893 case Token::SUB:
1894 __ sub_d(result, left, right);
1895 break;
1896 case Token::MUL:
1897 __ mul_d(result, left, right);
1898 break;
1899 case Token::DIV:
1900 __ div_d(result, left, right);
1901 break;
1902 case Token::MOD: {
1903 // Save a0-a3 on the stack.
1904 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1905 __ MultiPush(saved_regs);
1906
1907 __ PrepareCallCFunction(0, 2, scratch0());
1908 __ MovToFloatParameters(left, right);
1909 __ CallCFunction(
1910 ExternalReference::mod_two_doubles_operation(isolate()),
1911 0, 2);
1912 // Move the result in the double result register.
1913 __ MovFromFloatResult(result);
1914
1915 // Restore saved register.
1916 __ MultiPop(saved_regs);
1917 break;
1918 }
1919 default:
1920 UNREACHABLE();
1921 break;
1922 }
1923}
1924
1925
1926void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1927 DCHECK(ToRegister(instr->context()).is(cp));
1928 DCHECK(ToRegister(instr->left()).is(a1));
1929 DCHECK(ToRegister(instr->right()).is(a0));
1930 DCHECK(ToRegister(instr->result()).is(v0));
1931
Ben Murdoch097c5b22016-05-18 11:27:45 +01001932 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001933 CallCode(code, RelocInfo::CODE_TARGET, instr);
1934 // Other arch use a nop here, to signal that there is no inlined
1935 // patchable code. Mips does not need the nop, since our marker
1936 // instruction (andi zero_reg) will never be used in normal code.
1937}
1938
1939
1940template<class InstrType>
1941void LCodeGen::EmitBranch(InstrType instr,
1942 Condition condition,
1943 Register src1,
1944 const Operand& src2) {
1945 int left_block = instr->TrueDestination(chunk_);
1946 int right_block = instr->FalseDestination(chunk_);
1947
1948 int next_block = GetNextEmittedBlock();
1949 if (right_block == left_block || condition == al) {
1950 EmitGoto(left_block);
1951 } else if (left_block == next_block) {
1952 __ Branch(chunk_->GetAssemblyLabel(right_block),
1953 NegateCondition(condition), src1, src2);
1954 } else if (right_block == next_block) {
1955 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1956 } else {
1957 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1958 __ Branch(chunk_->GetAssemblyLabel(right_block));
1959 }
1960}
1961
1962
1963template<class InstrType>
1964void LCodeGen::EmitBranchF(InstrType instr,
1965 Condition condition,
1966 FPURegister src1,
1967 FPURegister src2) {
1968 int right_block = instr->FalseDestination(chunk_);
1969 int left_block = instr->TrueDestination(chunk_);
1970
1971 int next_block = GetNextEmittedBlock();
1972 if (right_block == left_block) {
1973 EmitGoto(left_block);
1974 } else if (left_block == next_block) {
1975 __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1976 NegateFpuCondition(condition), src1, src2);
1977 } else if (right_block == next_block) {
1978 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1979 condition, src1, src2);
1980 } else {
1981 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1982 condition, src1, src2);
1983 __ Branch(chunk_->GetAssemblyLabel(right_block));
1984 }
1985}
1986
1987
1988template <class InstrType>
1989void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
1990 Register src1, const Operand& src2) {
1991 int true_block = instr->TrueDestination(chunk_);
1992 __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
1993}
1994
1995
1996template <class InstrType>
1997void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
1998 Register src1, const Operand& src2) {
1999 int false_block = instr->FalseDestination(chunk_);
2000 __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
2001}
2002
2003
2004template<class InstrType>
2005void LCodeGen::EmitFalseBranchF(InstrType instr,
2006 Condition condition,
2007 FPURegister src1,
2008 FPURegister src2) {
2009 int false_block = instr->FalseDestination(chunk_);
2010 __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
2011 condition, src1, src2);
2012}
2013
2014
2015void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2016 __ stop("LDebugBreak");
2017}
2018
2019
2020void LCodeGen::DoBranch(LBranch* instr) {
2021 Representation r = instr->hydrogen()->value()->representation();
2022 if (r.IsInteger32() || r.IsSmi()) {
2023 DCHECK(!info()->IsStub());
2024 Register reg = ToRegister(instr->value());
2025 EmitBranch(instr, ne, reg, Operand(zero_reg));
2026 } else if (r.IsDouble()) {
2027 DCHECK(!info()->IsStub());
2028 DoubleRegister reg = ToDoubleRegister(instr->value());
2029 // Test the double value. Zero and NaN are false.
2030 EmitBranchF(instr, ogl, reg, kDoubleRegZero);
2031 } else {
2032 DCHECK(r.IsTagged());
2033 Register reg = ToRegister(instr->value());
2034 HType type = instr->hydrogen()->value()->type();
2035 if (type.IsBoolean()) {
2036 DCHECK(!info()->IsStub());
2037 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2038 EmitBranch(instr, eq, reg, Operand(at));
2039 } else if (type.IsSmi()) {
2040 DCHECK(!info()->IsStub());
2041 EmitBranch(instr, ne, reg, Operand(zero_reg));
2042 } else if (type.IsJSArray()) {
2043 DCHECK(!info()->IsStub());
2044 EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2045 } else if (type.IsHeapNumber()) {
2046 DCHECK(!info()->IsStub());
2047 DoubleRegister dbl_scratch = double_scratch0();
2048 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2049 // Test the double value. Zero and NaN are false.
2050 EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
2051 } else if (type.IsString()) {
2052 DCHECK(!info()->IsStub());
2053 __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
2054 EmitBranch(instr, ne, at, Operand(zero_reg));
2055 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01002056 ToBooleanICStub::Types expected =
2057 instr->hydrogen()->expected_input_types();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002058 // Avoid deopts in the case where we've never executed this path before.
Ben Murdochda12d292016-06-02 14:46:10 +01002059 if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002060
Ben Murdochda12d292016-06-02 14:46:10 +01002061 if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002062 // undefined -> false.
2063 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2064 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2065 }
Ben Murdochda12d292016-06-02 14:46:10 +01002066 if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002067 // Boolean -> its value.
2068 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2069 __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2070 __ LoadRoot(at, Heap::kFalseValueRootIndex);
2071 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2072 }
Ben Murdochda12d292016-06-02 14:46:10 +01002073 if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002074 // 'null' -> false.
2075 __ LoadRoot(at, Heap::kNullValueRootIndex);
2076 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2077 }
2078
Ben Murdochda12d292016-06-02 14:46:10 +01002079 if (expected.Contains(ToBooleanICStub::SMI)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002080 // Smis: 0 -> false, all other -> true.
2081 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2082 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2083 } else if (expected.NeedsMap()) {
2084 // If we need a map later and have a Smi -> deopt.
2085 __ SmiTst(reg, at);
2086 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
2087 }
2088
2089 const Register map = scratch0();
2090 if (expected.NeedsMap()) {
2091 __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2092 if (expected.CanBeUndetectable()) {
2093 // Undetectable -> false.
2094 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2095 __ And(at, at, Operand(1 << Map::kIsUndetectable));
2096 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2097 }
2098 }
2099
Ben Murdochda12d292016-06-02 14:46:10 +01002100 if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002101 // spec object -> true.
2102 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2103 __ Branch(instr->TrueLabel(chunk_),
2104 ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
2105 }
2106
Ben Murdochda12d292016-06-02 14:46:10 +01002107 if (expected.Contains(ToBooleanICStub::STRING)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002108 // String value -> false iff empty.
2109 Label not_string;
2110 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2111 __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2112 __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
2113 __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2114 __ Branch(instr->FalseLabel(chunk_));
2115 __ bind(&not_string);
2116 }
2117
Ben Murdochda12d292016-06-02 14:46:10 +01002118 if (expected.Contains(ToBooleanICStub::SYMBOL)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002119 // Symbol value -> true.
2120 const Register scratch = scratch1();
2121 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2122 __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2123 }
2124
Ben Murdochda12d292016-06-02 14:46:10 +01002125 if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002126 // SIMD value -> true.
2127 const Register scratch = scratch1();
2128 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2129 __ Branch(instr->TrueLabel(chunk_), eq, scratch,
2130 Operand(SIMD128_VALUE_TYPE));
2131 }
2132
Ben Murdochda12d292016-06-02 14:46:10 +01002133 if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002134 // heap number -> false iff +0, -0, or NaN.
2135 DoubleRegister dbl_scratch = double_scratch0();
2136 Label not_heap_number;
2137 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2138 __ Branch(&not_heap_number, ne, map, Operand(at));
2139 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2140 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2141 ne, dbl_scratch, kDoubleRegZero);
2142 // Falls through if dbl_scratch == 0.
2143 __ Branch(instr->FalseLabel(chunk_));
2144 __ bind(&not_heap_number);
2145 }
2146
2147 if (!expected.IsGeneric()) {
2148 // We've seen something for the first time -> deopt.
2149 // This can only happen if we are not generic already.
2150 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
2151 Operand(zero_reg));
2152 }
2153 }
2154 }
2155}
2156
2157
2158void LCodeGen::EmitGoto(int block) {
2159 if (!IsNextEmittedBlock(block)) {
2160 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2161 }
2162}
2163
2164
2165void LCodeGen::DoGoto(LGoto* instr) {
2166 EmitGoto(instr->block_id());
2167}
2168
2169
2170Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2171 Condition cond = kNoCondition;
2172 switch (op) {
2173 case Token::EQ:
2174 case Token::EQ_STRICT:
2175 cond = eq;
2176 break;
2177 case Token::NE:
2178 case Token::NE_STRICT:
2179 cond = ne;
2180 break;
2181 case Token::LT:
2182 cond = is_unsigned ? lo : lt;
2183 break;
2184 case Token::GT:
2185 cond = is_unsigned ? hi : gt;
2186 break;
2187 case Token::LTE:
2188 cond = is_unsigned ? ls : le;
2189 break;
2190 case Token::GTE:
2191 cond = is_unsigned ? hs : ge;
2192 break;
2193 case Token::IN:
2194 case Token::INSTANCEOF:
2195 default:
2196 UNREACHABLE();
2197 }
2198 return cond;
2199}
2200
2201
2202void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2203 LOperand* left = instr->left();
2204 LOperand* right = instr->right();
2205 bool is_unsigned =
2206 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2207 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2208 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2209
2210 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2211 // We can statically evaluate the comparison.
2212 double left_val = ToDouble(LConstantOperand::cast(left));
2213 double right_val = ToDouble(LConstantOperand::cast(right));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002214 int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2215 ? instr->TrueDestination(chunk_)
2216 : instr->FalseDestination(chunk_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002217 EmitGoto(next_block);
2218 } else {
2219 if (instr->is_double()) {
2220 // Compare left and right as doubles and load the
2221 // resulting flags into the normal status register.
2222 FPURegister left_reg = ToDoubleRegister(left);
2223 FPURegister right_reg = ToDoubleRegister(right);
2224
2225 // If a NaN is involved, i.e. the result is unordered,
2226 // jump to false block label.
2227 __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2228 left_reg, right_reg);
2229
2230 EmitBranchF(instr, cond, left_reg, right_reg);
2231 } else {
2232 Register cmp_left;
2233 Operand cmp_right = Operand((int64_t)0);
2234 if (right->IsConstantOperand()) {
2235 int32_t value = ToInteger32(LConstantOperand::cast(right));
2236 if (instr->hydrogen_value()->representation().IsSmi()) {
2237 cmp_left = ToRegister(left);
2238 cmp_right = Operand(Smi::FromInt(value));
2239 } else {
2240 cmp_left = ToRegister(left);
2241 cmp_right = Operand(value);
2242 }
2243 } else if (left->IsConstantOperand()) {
2244 int32_t value = ToInteger32(LConstantOperand::cast(left));
2245 if (instr->hydrogen_value()->representation().IsSmi()) {
2246 cmp_left = ToRegister(right);
2247 cmp_right = Operand(Smi::FromInt(value));
2248 } else {
2249 cmp_left = ToRegister(right);
2250 cmp_right = Operand(value);
2251 }
2252 // We commuted the operands, so commute the condition.
2253 cond = CommuteCondition(cond);
2254 } else {
2255 cmp_left = ToRegister(left);
2256 cmp_right = Operand(ToRegister(right));
2257 }
2258
2259 EmitBranch(instr, cond, cmp_left, cmp_right);
2260 }
2261 }
2262}
2263
2264
2265void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2266 Register left = ToRegister(instr->left());
2267 Register right = ToRegister(instr->right());
2268
2269 EmitBranch(instr, eq, left, Operand(right));
2270}
2271
2272
2273void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2274 if (instr->hydrogen()->representation().IsTagged()) {
2275 Register input_reg = ToRegister(instr->object());
2276 __ li(at, Operand(factory()->the_hole_value()));
2277 EmitBranch(instr, eq, input_reg, Operand(at));
2278 return;
2279 }
2280
2281 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2282 EmitFalseBranchF(instr, eq, input_reg, input_reg);
2283
2284 Register scratch = scratch0();
2285 __ FmoveHigh(scratch, input_reg);
2286 EmitBranch(instr, eq, scratch,
2287 Operand(static_cast<int32_t>(kHoleNanUpper32)));
2288}
2289
2290
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002291Condition LCodeGen::EmitIsString(Register input,
2292 Register temp1,
2293 Label* is_not_string,
2294 SmiCheck check_needed = INLINE_SMI_CHECK) {
2295 if (check_needed == INLINE_SMI_CHECK) {
2296 __ JumpIfSmi(input, is_not_string);
2297 }
2298 __ GetObjectType(input, temp1, temp1);
2299
2300 return lt;
2301}
2302
2303
2304void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2305 Register reg = ToRegister(instr->value());
2306 Register temp1 = ToRegister(instr->temp());
2307
2308 SmiCheck check_needed =
2309 instr->hydrogen()->value()->type().IsHeapObject()
2310 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2311 Condition true_cond =
2312 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2313
2314 EmitBranch(instr, true_cond, temp1,
2315 Operand(FIRST_NONSTRING_TYPE));
2316}
2317
2318
2319void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2320 Register input_reg = EmitLoadRegister(instr->value(), at);
2321 __ And(at, input_reg, kSmiTagMask);
2322 EmitBranch(instr, eq, at, Operand(zero_reg));
2323}
2324
2325
2326void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2327 Register input = ToRegister(instr->value());
2328 Register temp = ToRegister(instr->temp());
2329
2330 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2331 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2332 }
2333 __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2334 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2335 __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2336 EmitBranch(instr, ne, at, Operand(zero_reg));
2337}
2338
2339
2340static Condition ComputeCompareCondition(Token::Value op) {
2341 switch (op) {
2342 case Token::EQ_STRICT:
2343 case Token::EQ:
2344 return eq;
2345 case Token::LT:
2346 return lt;
2347 case Token::GT:
2348 return gt;
2349 case Token::LTE:
2350 return le;
2351 case Token::GTE:
2352 return ge;
2353 default:
2354 UNREACHABLE();
2355 return kNoCondition;
2356 }
2357}
2358
2359
2360void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2361 DCHECK(ToRegister(instr->context()).is(cp));
2362 DCHECK(ToRegister(instr->left()).is(a1));
2363 DCHECK(ToRegister(instr->right()).is(a0));
2364
Ben Murdochda12d292016-06-02 14:46:10 +01002365 Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002366 CallCode(code, RelocInfo::CODE_TARGET, instr);
Ben Murdochda12d292016-06-02 14:46:10 +01002367 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2368 EmitBranch(instr, eq, v0, Operand(at));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002369}
2370
2371
2372static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2373 InstanceType from = instr->from();
2374 InstanceType to = instr->to();
2375 if (from == FIRST_TYPE) return to;
2376 DCHECK(from == to || to == LAST_TYPE);
2377 return from;
2378}
2379
2380
2381static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2382 InstanceType from = instr->from();
2383 InstanceType to = instr->to();
2384 if (from == to) return eq;
2385 if (to == LAST_TYPE) return hs;
2386 if (from == FIRST_TYPE) return ls;
2387 UNREACHABLE();
2388 return eq;
2389}
2390
2391
2392void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2393 Register scratch = scratch0();
2394 Register input = ToRegister(instr->value());
2395
2396 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2397 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2398 }
2399
2400 __ GetObjectType(input, scratch, scratch);
2401 EmitBranch(instr,
2402 BranchCondition(instr->hydrogen()),
2403 scratch,
2404 Operand(TestType(instr->hydrogen())));
2405}
2406
2407
2408void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2409 Register input = ToRegister(instr->value());
2410 Register result = ToRegister(instr->result());
2411
2412 __ AssertString(input);
2413
2414 __ lwu(result, FieldMemOperand(input, String::kHashFieldOffset));
2415 __ IndexFromHash(result, result);
2416}
2417
2418
2419void LCodeGen::DoHasCachedArrayIndexAndBranch(
2420 LHasCachedArrayIndexAndBranch* instr) {
2421 Register input = ToRegister(instr->value());
2422 Register scratch = scratch0();
2423
2424 __ lwu(scratch,
2425 FieldMemOperand(input, String::kHashFieldOffset));
2426 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2427 EmitBranch(instr, eq, at, Operand(zero_reg));
2428}
2429
2430
2431// Branches to a label or falls through with the answer in flags. Trashes
2432// the temp registers, but not the input.
2433void LCodeGen::EmitClassOfTest(Label* is_true,
2434 Label* is_false,
2435 Handle<String>class_name,
2436 Register input,
2437 Register temp,
2438 Register temp2) {
2439 DCHECK(!input.is(temp));
2440 DCHECK(!input.is(temp2));
2441 DCHECK(!temp.is(temp2));
2442
2443 __ JumpIfSmi(input, is_false);
2444
2445 __ GetObjectType(input, temp, temp2);
Ben Murdochda12d292016-06-02 14:46:10 +01002446 STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002447 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002448 __ Branch(is_true, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002449 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01002450 __ Branch(is_false, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002451 }
2452
2453 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2454 // Check if the constructor in the map is a function.
2455 Register instance_type = scratch1();
2456 DCHECK(!instance_type.is(temp));
2457 __ GetMapConstructor(temp, temp, temp2, instance_type);
2458
2459 // Objects with a non-function constructor have class 'Object'.
2460 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2461 __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2462 } else {
2463 __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2464 }
2465
2466 // temp now contains the constructor function. Grab the
2467 // instance class name from there.
2468 __ ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2469 __ ld(temp, FieldMemOperand(temp,
2470 SharedFunctionInfo::kInstanceClassNameOffset));
2471 // The class name we are testing against is internalized since it's a literal.
2472 // The name in the constructor is internalized because of the way the context
2473 // is booted. This routine isn't expected to work for random API-created
2474 // classes and it doesn't have to because you can't access it with natives
2475 // syntax. Since both sides are internalized it is sufficient to use an
2476 // identity comparison.
2477
2478 // End with the address of this class_name instance in temp register.
2479 // On MIPS, the caller must do the comparison with Handle<String>class_name.
2480}
2481
2482
2483void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2484 Register input = ToRegister(instr->value());
2485 Register temp = scratch0();
2486 Register temp2 = ToRegister(instr->temp());
2487 Handle<String> class_name = instr->hydrogen()->class_name();
2488
2489 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2490 class_name, input, temp, temp2);
2491
2492 EmitBranch(instr, eq, temp, Operand(class_name));
2493}
2494
2495
2496void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2497 Register reg = ToRegister(instr->value());
2498 Register temp = ToRegister(instr->temp());
2499
2500 __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2501 EmitBranch(instr, eq, temp, Operand(instr->map()));
2502}
2503
2504
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002505void LCodeGen::DoHasInPrototypeChainAndBranch(
2506 LHasInPrototypeChainAndBranch* instr) {
2507 Register const object = ToRegister(instr->object());
2508 Register const object_map = scratch0();
2509 Register const object_instance_type = scratch1();
2510 Register const object_prototype = object_map;
2511 Register const prototype = ToRegister(instr->prototype());
2512
2513 // The {object} must be a spec object. It's sufficient to know that {object}
2514 // is not a smi, since all other non-spec objects have {null} prototypes and
2515 // will be ruled out below.
2516 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2517 __ SmiTst(object, at);
2518 EmitFalseBranch(instr, eq, at, Operand(zero_reg));
2519 }
2520
2521 // Loop through the {object}s prototype chain looking for the {prototype}.
2522 __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2523 Label loop;
2524 __ bind(&loop);
2525
2526 // Deoptimize if the object needs to be access checked.
2527 __ lbu(object_instance_type,
2528 FieldMemOperand(object_map, Map::kBitFieldOffset));
2529 __ And(object_instance_type, object_instance_type,
2530 Operand(1 << Map::kIsAccessCheckNeeded));
2531 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
2532 Operand(zero_reg));
2533 __ lbu(object_instance_type,
2534 FieldMemOperand(object_map, Map::kInstanceTypeOffset));
2535 DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
2536 Operand(JS_PROXY_TYPE));
2537
2538 __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
2539 EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
2540 __ LoadRoot(at, Heap::kNullValueRootIndex);
2541 EmitFalseBranch(instr, eq, object_prototype, Operand(at));
2542 __ Branch(&loop, USE_DELAY_SLOT);
2543 __ ld(object_map, FieldMemOperand(object_prototype,
2544 HeapObject::kMapOffset)); // In delay slot.
2545}
2546
2547
2548void LCodeGen::DoCmpT(LCmpT* instr) {
2549 DCHECK(ToRegister(instr->context()).is(cp));
2550 Token::Value op = instr->op();
2551
Ben Murdoch097c5b22016-05-18 11:27:45 +01002552 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002553 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2554 // On MIPS there is no need for a "no inlined smi code" marker (nop).
2555
2556 Condition condition = ComputeCompareCondition(op);
2557 // A minor optimization that relies on LoadRoot always emitting one
2558 // instruction.
2559 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2560 Label done, check;
2561 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2562 __ bind(&check);
2563 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2564 DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
2565 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2566 __ bind(&done);
2567}
2568
2569
2570void LCodeGen::DoReturn(LReturn* instr) {
2571 if (FLAG_trace && info()->IsOptimizing()) {
2572 // Push the return value on the stack as the parameter.
2573 // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2574 // managed by the register allocator and tearing down the frame, it's
2575 // safe to write to the context register.
2576 __ push(v0);
2577 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2578 __ CallRuntime(Runtime::kTraceExit);
2579 }
2580 if (info()->saves_caller_doubles()) {
2581 RestoreCallerDoubles();
2582 }
2583 if (NeedsEagerFrame()) {
2584 __ mov(sp, fp);
2585 __ Pop(ra, fp);
2586 }
2587 if (instr->has_constant_parameter_count()) {
2588 int parameter_count = ToInteger32(instr->constant_parameter_count());
2589 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2590 if (sp_delta != 0) {
2591 __ Daddu(sp, sp, Operand(sp_delta));
2592 }
2593 } else {
2594 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2595 Register reg = ToRegister(instr->parameter_count());
2596 // The argument count parameter is a smi
2597 __ SmiUntag(reg);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002598 __ Dlsa(sp, sp, reg, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002599 }
2600
2601 __ Jump(ra);
2602}
2603
2604
2605template <class T>
2606void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2607 Register vector_register = ToRegister(instr->temp_vector());
2608 Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2609 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2610 DCHECK(slot_register.is(a0));
2611
2612 AllowDeferredHandleDereference vector_structure_check;
2613 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2614 __ li(vector_register, vector);
2615 // No need to allocate this register.
2616 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2617 int index = vector->GetIndex(slot);
2618 __ li(slot_register, Operand(Smi::FromInt(index)));
2619}
2620
2621
2622template <class T>
2623void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2624 Register vector_register = ToRegister(instr->temp_vector());
2625 Register slot_register = ToRegister(instr->temp_slot());
2626
2627 AllowDeferredHandleDereference vector_structure_check;
2628 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2629 __ li(vector_register, vector);
2630 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2631 int index = vector->GetIndex(slot);
2632 __ li(slot_register, Operand(Smi::FromInt(index)));
2633}
2634
2635
2636void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2637 DCHECK(ToRegister(instr->context()).is(cp));
2638 DCHECK(ToRegister(instr->global_object())
2639 .is(LoadDescriptor::ReceiverRegister()));
2640 DCHECK(ToRegister(instr->result()).is(v0));
2641
2642 __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2643 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002644 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2645 isolate(), instr->typeof_mode(), PREMONOMORPHIC)
2646 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002647 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2648}
2649
2650
2651void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2652 Register context = ToRegister(instr->context());
2653 Register result = ToRegister(instr->result());
2654
2655 __ ld(result, ContextMemOperand(context, instr->slot_index()));
2656 if (instr->hydrogen()->RequiresHoleCheck()) {
2657 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2658
2659 if (instr->hydrogen()->DeoptimizesOnHole()) {
2660 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
2661 } else {
2662 Label is_not_hole;
2663 __ Branch(&is_not_hole, ne, result, Operand(at));
2664 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2665 __ bind(&is_not_hole);
2666 }
2667 }
2668}
2669
2670
2671void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2672 Register context = ToRegister(instr->context());
2673 Register value = ToRegister(instr->value());
2674 Register scratch = scratch0();
2675 MemOperand target = ContextMemOperand(context, instr->slot_index());
2676
2677 Label skip_assignment;
2678
2679 if (instr->hydrogen()->RequiresHoleCheck()) {
2680 __ ld(scratch, target);
2681 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2682
2683 if (instr->hydrogen()->DeoptimizesOnHole()) {
2684 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
2685 } else {
2686 __ Branch(&skip_assignment, ne, scratch, Operand(at));
2687 }
2688 }
2689
2690 __ sd(value, target);
2691 if (instr->hydrogen()->NeedsWriteBarrier()) {
2692 SmiCheck check_needed =
2693 instr->hydrogen()->value()->type().IsHeapObject()
2694 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2695 __ RecordWriteContextSlot(context,
2696 target.offset(),
2697 value,
2698 scratch0(),
2699 GetRAState(),
2700 kSaveFPRegs,
2701 EMIT_REMEMBERED_SET,
2702 check_needed);
2703 }
2704
2705 __ bind(&skip_assignment);
2706}
2707
2708
2709void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2710 HObjectAccess access = instr->hydrogen()->access();
2711 int offset = access.offset();
2712 Register object = ToRegister(instr->object());
2713 if (access.IsExternalMemory()) {
2714 Register result = ToRegister(instr->result());
2715 MemOperand operand = MemOperand(object, offset);
2716 __ Load(result, operand, access.representation());
2717 return;
2718 }
2719
2720 if (instr->hydrogen()->representation().IsDouble()) {
2721 DoubleRegister result = ToDoubleRegister(instr->result());
2722 __ ldc1(result, FieldMemOperand(object, offset));
2723 return;
2724 }
2725
2726 Register result = ToRegister(instr->result());
2727 if (!access.IsInobject()) {
2728 __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2729 object = result;
2730 }
2731
2732 Representation representation = access.representation();
2733 if (representation.IsSmi() && SmiValuesAre32Bits() &&
2734 instr->hydrogen()->representation().IsInteger32()) {
2735 if (FLAG_debug_code) {
2736 // Verify this is really an Smi.
2737 Register scratch = scratch0();
2738 __ Load(scratch, FieldMemOperand(object, offset), representation);
2739 __ AssertSmi(scratch);
2740 }
2741
2742 // Read int value directly from upper half of the smi.
2743 STATIC_ASSERT(kSmiTag == 0);
2744 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
2745 offset = SmiWordOffset(offset);
2746 representation = Representation::Integer32();
2747 }
2748 __ Load(result, FieldMemOperand(object, offset), representation);
2749}
2750
2751
2752void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2753 DCHECK(ToRegister(instr->context()).is(cp));
2754 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2755 DCHECK(ToRegister(instr->result()).is(v0));
2756
2757 // Name is always in a2.
2758 __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2759 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002760 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2761 isolate(), NOT_INSIDE_TYPEOF,
2762 instr->hydrogen()->initialization_state())
2763 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002764 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2765}
2766
2767
2768void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2769 Register scratch = scratch0();
2770 Register function = ToRegister(instr->function());
2771 Register result = ToRegister(instr->result());
2772
2773 // Get the prototype or initial map from the function.
2774 __ ld(result,
2775 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2776
2777 // Check that the function has a prototype or an initial map.
2778 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2779 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
2780
2781 // If the function does not have an initial map, we're done.
2782 Label done;
2783 __ GetObjectType(result, scratch, scratch);
2784 __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2785
2786 // Get the prototype from the initial map.
2787 __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
2788
2789 // All done.
2790 __ bind(&done);
2791}
2792
2793
2794void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2795 Register result = ToRegister(instr->result());
2796 __ LoadRoot(result, instr->index());
2797}
2798
2799
2800void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2801 Register arguments = ToRegister(instr->arguments());
2802 Register result = ToRegister(instr->result());
2803 // There are two words between the frame pointer and the last argument.
2804 // Subtracting from length accounts for one of them add one more.
2805 if (instr->length()->IsConstantOperand()) {
2806 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2807 if (instr->index()->IsConstantOperand()) {
2808 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2809 int index = (const_length - const_index) + 1;
2810 __ ld(result, MemOperand(arguments, index * kPointerSize));
2811 } else {
2812 Register index = ToRegister(instr->index());
2813 __ li(at, Operand(const_length + 1));
2814 __ Dsubu(result, at, index);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002815 __ Dlsa(at, arguments, result, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002816 __ ld(result, MemOperand(at));
2817 }
2818 } else if (instr->index()->IsConstantOperand()) {
2819 Register length = ToRegister(instr->length());
2820 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2821 int loc = const_index - 1;
2822 if (loc != 0) {
2823 __ Dsubu(result, length, Operand(loc));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002824 __ Dlsa(at, arguments, result, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002825 __ ld(result, MemOperand(at));
2826 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002827 __ Dlsa(at, arguments, length, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002828 __ ld(result, MemOperand(at));
2829 }
2830 } else {
2831 Register length = ToRegister(instr->length());
2832 Register index = ToRegister(instr->index());
2833 __ Dsubu(result, length, index);
2834 __ Daddu(result, result, 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002835 __ Dlsa(at, arguments, result, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002836 __ ld(result, MemOperand(at));
2837 }
2838}
2839
2840
2841void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2842 Register external_pointer = ToRegister(instr->elements());
2843 Register key = no_reg;
2844 ElementsKind elements_kind = instr->elements_kind();
2845 bool key_is_constant = instr->key()->IsConstantOperand();
2846 int constant_key = 0;
2847 if (key_is_constant) {
2848 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2849 if (constant_key & 0xF0000000) {
2850 Abort(kArrayIndexConstantValueTooBig);
2851 }
2852 } else {
2853 key = ToRegister(instr->key());
2854 }
2855 int element_size_shift = ElementsKindToShiftSize(elements_kind);
2856 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2857 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
2858 : element_size_shift;
2859 int base_offset = instr->base_offset();
2860
2861 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2862 FPURegister result = ToDoubleRegister(instr->result());
2863 if (key_is_constant) {
2864 __ Daddu(scratch0(), external_pointer,
2865 constant_key << element_size_shift);
2866 } else {
2867 if (shift_size < 0) {
2868 if (shift_size == -32) {
2869 __ dsra32(scratch0(), key, 0);
2870 } else {
2871 __ dsra(scratch0(), key, -shift_size);
2872 }
2873 } else {
2874 __ dsll(scratch0(), key, shift_size);
2875 }
2876 __ Daddu(scratch0(), scratch0(), external_pointer);
2877 }
2878 if (elements_kind == FLOAT32_ELEMENTS) {
2879 __ lwc1(result, MemOperand(scratch0(), base_offset));
2880 __ cvt_d_s(result, result);
2881 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2882 __ ldc1(result, MemOperand(scratch0(), base_offset));
2883 }
2884 } else {
2885 Register result = ToRegister(instr->result());
2886 MemOperand mem_operand = PrepareKeyedOperand(
2887 key, external_pointer, key_is_constant, constant_key,
2888 element_size_shift, shift_size, base_offset);
2889 switch (elements_kind) {
2890 case INT8_ELEMENTS:
2891 __ lb(result, mem_operand);
2892 break;
2893 case UINT8_ELEMENTS:
2894 case UINT8_CLAMPED_ELEMENTS:
2895 __ lbu(result, mem_operand);
2896 break;
2897 case INT16_ELEMENTS:
2898 __ lh(result, mem_operand);
2899 break;
2900 case UINT16_ELEMENTS:
2901 __ lhu(result, mem_operand);
2902 break;
2903 case INT32_ELEMENTS:
2904 __ lw(result, mem_operand);
2905 break;
2906 case UINT32_ELEMENTS:
2907 __ lw(result, mem_operand);
2908 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2909 DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
2910 result, Operand(0x80000000));
2911 }
2912 break;
2913 case FLOAT32_ELEMENTS:
2914 case FLOAT64_ELEMENTS:
2915 case FAST_DOUBLE_ELEMENTS:
2916 case FAST_ELEMENTS:
2917 case FAST_SMI_ELEMENTS:
2918 case FAST_HOLEY_DOUBLE_ELEMENTS:
2919 case FAST_HOLEY_ELEMENTS:
2920 case FAST_HOLEY_SMI_ELEMENTS:
2921 case DICTIONARY_ELEMENTS:
2922 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2923 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002924 case FAST_STRING_WRAPPER_ELEMENTS:
2925 case SLOW_STRING_WRAPPER_ELEMENTS:
2926 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002927 UNREACHABLE();
2928 break;
2929 }
2930 }
2931}
2932
2933
2934void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2935 Register elements = ToRegister(instr->elements());
2936 bool key_is_constant = instr->key()->IsConstantOperand();
2937 Register key = no_reg;
2938 DoubleRegister result = ToDoubleRegister(instr->result());
2939 Register scratch = scratch0();
2940
2941 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2942
2943 int base_offset = instr->base_offset();
2944 if (key_is_constant) {
2945 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2946 if (constant_key & 0xF0000000) {
2947 Abort(kArrayIndexConstantValueTooBig);
2948 }
2949 base_offset += constant_key * kDoubleSize;
2950 }
2951 __ Daddu(scratch, elements, Operand(base_offset));
2952
2953 if (!key_is_constant) {
2954 key = ToRegister(instr->key());
2955 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2956 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
2957 : element_size_shift;
2958 if (shift_size > 0) {
2959 __ dsll(at, key, shift_size);
2960 } else if (shift_size == -32) {
2961 __ dsra32(at, key, 0);
2962 } else {
2963 __ dsra(at, key, -shift_size);
2964 }
2965 __ Daddu(scratch, scratch, at);
2966 }
2967
2968 __ ldc1(result, MemOperand(scratch));
2969
2970 if (instr->hydrogen()->RequiresHoleCheck()) {
2971 __ FmoveHigh(scratch, result);
2972 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
2973 Operand(static_cast<int32_t>(kHoleNanUpper32)));
2974 }
2975}
2976
2977
2978void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2979 HLoadKeyed* hinstr = instr->hydrogen();
2980 Register elements = ToRegister(instr->elements());
2981 Register result = ToRegister(instr->result());
2982 Register scratch = scratch0();
2983 Register store_base = scratch;
2984 int offset = instr->base_offset();
2985
2986 if (instr->key()->IsConstantOperand()) {
2987 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2988 offset += ToInteger32(const_operand) * kPointerSize;
2989 store_base = elements;
2990 } else {
2991 Register key = ToRegister(instr->key());
2992 // Even though the HLoadKeyed instruction forces the input
2993 // representation for the key to be an integer, the input gets replaced
2994 // during bound check elimination with the index argument to the bounds
2995 // check, which can be tagged, so that case must be handled here, too.
2996 if (instr->hydrogen()->key()->representation().IsSmi()) {
2997 __ SmiScale(scratch, key, kPointerSizeLog2);
2998 __ daddu(scratch, elements, scratch);
2999 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003000 __ Dlsa(scratch, elements, key, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003001 }
3002 }
3003
3004 Representation representation = hinstr->representation();
3005 if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3006 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3007 DCHECK(!hinstr->RequiresHoleCheck());
3008 if (FLAG_debug_code) {
3009 Register temp = scratch1();
3010 __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
3011 __ AssertSmi(temp);
3012 }
3013
3014 // Read int value directly from upper half of the smi.
3015 STATIC_ASSERT(kSmiTag == 0);
3016 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3017 offset = SmiWordOffset(offset);
3018 }
3019
3020 __ Load(result, MemOperand(store_base, offset), representation);
3021
3022 // Check for the hole value.
3023 if (hinstr->RequiresHoleCheck()) {
3024 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3025 __ SmiTst(result, scratch);
3026 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
3027 Operand(zero_reg));
3028 } else {
3029 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3030 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
3031 }
3032 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3033 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3034 Label done;
3035 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3036 __ Branch(&done, ne, result, Operand(scratch));
3037 if (info()->IsStub()) {
3038 // A stub can safely convert the hole to undefined only if the array
3039 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3040 // it needs to bail out.
3041 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3042 // The comparison only needs LS bits of value, which is a smi.
3043 __ ld(result, FieldMemOperand(result, Cell::kValueOffset));
3044 DeoptimizeIf(ne, instr, Deoptimizer::kHole, result,
3045 Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
3046 }
3047 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3048 __ bind(&done);
3049 }
3050}
3051
3052
3053void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3054 if (instr->is_fixed_typed_array()) {
3055 DoLoadKeyedExternalArray(instr);
3056 } else if (instr->hydrogen()->representation().IsDouble()) {
3057 DoLoadKeyedFixedDoubleArray(instr);
3058 } else {
3059 DoLoadKeyedFixedArray(instr);
3060 }
3061}
3062
3063
3064MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3065 Register base,
3066 bool key_is_constant,
3067 int constant_key,
3068 int element_size,
3069 int shift_size,
3070 int base_offset) {
3071 if (key_is_constant) {
3072 return MemOperand(base, (constant_key << element_size) + base_offset);
3073 }
3074
3075 if (base_offset == 0) {
3076 if (shift_size >= 0) {
3077 __ dsll(scratch0(), key, shift_size);
3078 __ Daddu(scratch0(), base, scratch0());
3079 return MemOperand(scratch0());
3080 } else {
3081 if (shift_size == -32) {
3082 __ dsra32(scratch0(), key, 0);
3083 } else {
3084 __ dsra(scratch0(), key, -shift_size);
3085 }
3086 __ Daddu(scratch0(), base, scratch0());
3087 return MemOperand(scratch0());
3088 }
3089 }
3090
3091 if (shift_size >= 0) {
3092 __ dsll(scratch0(), key, shift_size);
3093 __ Daddu(scratch0(), base, scratch0());
3094 return MemOperand(scratch0(), base_offset);
3095 } else {
3096 if (shift_size == -32) {
3097 __ dsra32(scratch0(), key, 0);
3098 } else {
3099 __ dsra(scratch0(), key, -shift_size);
3100 }
3101 __ Daddu(scratch0(), base, scratch0());
3102 return MemOperand(scratch0(), base_offset);
3103 }
3104}
3105
3106
3107void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3108 DCHECK(ToRegister(instr->context()).is(cp));
3109 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3110 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3111
3112 if (instr->hydrogen()->HasVectorAndSlot()) {
3113 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3114 }
3115
3116 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
Ben Murdoch097c5b22016-05-18 11:27:45 +01003117 isolate(), instr->hydrogen()->initialization_state())
3118 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003119 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3120}
3121
3122
3123void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3124 Register scratch = scratch0();
3125 Register temp = scratch1();
3126 Register result = ToRegister(instr->result());
3127
3128 if (instr->hydrogen()->from_inlined()) {
3129 __ Dsubu(result, sp, 2 * kPointerSize);
Ben Murdochda12d292016-06-02 14:46:10 +01003130 } else if (instr->hydrogen()->arguments_adaptor()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003131 // Check if the calling frame is an arguments adaptor frame.
3132 Label done, adapted;
3133 __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01003134 __ ld(result,
3135 MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003136 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3137
3138 // Result is the frame pointer for the frame if not adapted and for the real
3139 // frame below the adaptor frame if adapted.
3140 __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
3141 __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
Ben Murdochda12d292016-06-02 14:46:10 +01003142 } else {
3143 __ mov(result, fp);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003144 }
3145}
3146
3147
3148void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3149 Register elem = ToRegister(instr->elements());
3150 Register result = ToRegister(instr->result());
3151
3152 Label done;
3153
3154 // If no arguments adaptor frame the number of arguments is fixed.
3155 __ Daddu(result, zero_reg, Operand(scope()->num_parameters()));
3156 __ Branch(&done, eq, fp, Operand(elem));
3157
3158 // Arguments adaptor frame present. Get argument length from there.
3159 __ ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3160 __ ld(result,
3161 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3162 __ SmiUntag(result);
3163
3164 // Argument length is in result register.
3165 __ bind(&done);
3166}
3167
3168
3169void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3170 Register receiver = ToRegister(instr->receiver());
3171 Register function = ToRegister(instr->function());
3172 Register result = ToRegister(instr->result());
3173 Register scratch = scratch0();
3174
3175 // If the receiver is null or undefined, we have to pass the global
3176 // object as a receiver to normal functions. Values have to be
3177 // passed unchanged to builtins and strict-mode functions.
3178 Label global_object, result_in_receiver;
3179
3180 if (!instr->hydrogen()->known_function()) {
3181 // Do not transform the receiver to object for strict mode functions.
3182 __ ld(scratch,
3183 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3184
3185 // Do not transform the receiver to object for builtins.
3186 int32_t strict_mode_function_mask =
3187 1 << SharedFunctionInfo::kStrictModeBitWithinByte;
3188 int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
3189
3190 __ lbu(at,
3191 FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset));
3192 __ And(at, at, Operand(strict_mode_function_mask));
3193 __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3194 __ lbu(at,
3195 FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
3196 __ And(at, at, Operand(native_mask));
3197 __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3198 }
3199
3200 // Normal function. Replace undefined or null with global receiver.
3201 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3202 __ Branch(&global_object, eq, receiver, Operand(scratch));
3203 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3204 __ Branch(&global_object, eq, receiver, Operand(scratch));
3205
3206 // Deoptimize if the receiver is not a JS object.
3207 __ SmiTst(receiver, scratch);
3208 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
3209
3210 __ GetObjectType(receiver, scratch, scratch);
3211 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
3212 Operand(FIRST_JS_RECEIVER_TYPE));
3213 __ Branch(&result_in_receiver);
3214
3215 __ bind(&global_object);
3216 __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
3217 __ ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3218 __ ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3219
3220 if (result.is(receiver)) {
3221 __ bind(&result_in_receiver);
3222 } else {
3223 Label result_ok;
3224 __ Branch(&result_ok);
3225 __ bind(&result_in_receiver);
3226 __ mov(result, receiver);
3227 __ bind(&result_ok);
3228 }
3229}
3230
3231
3232void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3233 Register receiver = ToRegister(instr->receiver());
3234 Register function = ToRegister(instr->function());
3235 Register length = ToRegister(instr->length());
3236 Register elements = ToRegister(instr->elements());
3237 Register scratch = scratch0();
3238 DCHECK(receiver.is(a0)); // Used for parameter count.
3239 DCHECK(function.is(a1)); // Required by InvokeFunction.
3240 DCHECK(ToRegister(instr->result()).is(v0));
3241
3242 // Copy the arguments to this function possibly from the
3243 // adaptor frame below it.
3244 const uint32_t kArgumentsLimit = 1 * KB;
3245 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
3246 Operand(kArgumentsLimit));
3247
3248 // Push the receiver and use the register to keep the original
3249 // number of arguments.
3250 __ push(receiver);
3251 __ Move(receiver, length);
3252 // The arguments are at a one pointer size offset from elements.
3253 __ Daddu(elements, elements, Operand(1 * kPointerSize));
3254
3255 // Loop through the arguments pushing them onto the execution
3256 // stack.
3257 Label invoke, loop;
3258 // length is a small non-negative integer, due to the test above.
3259 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3260 __ dsll(scratch, length, kPointerSizeLog2);
3261 __ bind(&loop);
3262 __ Daddu(scratch, elements, scratch);
3263 __ ld(scratch, MemOperand(scratch));
3264 __ push(scratch);
3265 __ Dsubu(length, length, Operand(1));
3266 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3267 __ dsll(scratch, length, kPointerSizeLog2);
3268
3269 __ bind(&invoke);
Ben Murdochda12d292016-06-02 14:46:10 +01003270
3271 InvokeFlag flag = CALL_FUNCTION;
3272 if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
3273 DCHECK(!info()->saves_caller_doubles());
3274 // TODO(ishell): drop current frame before pushing arguments to the stack.
3275 flag = JUMP_FUNCTION;
3276 ParameterCount actual(a0);
3277 // It is safe to use t0, t1 and t2 as scratch registers here given that
3278 // we are not going to return to caller function anyway.
3279 PrepareForTailCall(actual, t0, t1, t2);
3280 }
3281
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003282 DCHECK(instr->HasPointerMap());
3283 LPointerMap* pointers = instr->pointer_map();
Ben Murdochda12d292016-06-02 14:46:10 +01003284 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003285 // The number of arguments is stored in receiver which is a0, as expected
3286 // by InvokeFunction.
3287 ParameterCount actual(receiver);
Ben Murdochda12d292016-06-02 14:46:10 +01003288 __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003289}
3290
3291
3292void LCodeGen::DoPushArgument(LPushArgument* instr) {
3293 LOperand* argument = instr->value();
3294 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3295 Abort(kDoPushArgumentNotImplementedForDoubleType);
3296 } else {
3297 Register argument_reg = EmitLoadRegister(argument, at);
3298 __ push(argument_reg);
3299 }
3300}
3301
3302
3303void LCodeGen::DoDrop(LDrop* instr) {
3304 __ Drop(instr->count());
3305}
3306
3307
3308void LCodeGen::DoThisFunction(LThisFunction* instr) {
3309 Register result = ToRegister(instr->result());
3310 __ ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3311}
3312
3313
3314void LCodeGen::DoContext(LContext* instr) {
3315 // If there is a non-return use, the context must be moved to a register.
3316 Register result = ToRegister(instr->result());
3317 if (info()->IsOptimizing()) {
3318 __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3319 } else {
3320 // If there is no frame, the context must be in cp.
3321 DCHECK(result.is(cp));
3322 }
3323}
3324
3325
3326void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3327 DCHECK(ToRegister(instr->context()).is(cp));
3328 __ li(scratch0(), instr->hydrogen()->pairs());
3329 __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3330 __ Push(scratch0(), scratch1());
3331 CallRuntime(Runtime::kDeclareGlobals, instr);
3332}
3333
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003334void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3335 int formal_parameter_count, int arity,
Ben Murdochda12d292016-06-02 14:46:10 +01003336 bool is_tail_call, LInstruction* instr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003337 bool dont_adapt_arguments =
3338 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3339 bool can_invoke_directly =
3340 dont_adapt_arguments || formal_parameter_count == arity;
3341
3342 Register function_reg = a1;
3343 LPointerMap* pointers = instr->pointer_map();
3344
3345 if (can_invoke_directly) {
3346 // Change context.
3347 __ ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3348
3349 // Always initialize new target and number of actual arguments.
3350 __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
3351 __ li(a0, Operand(arity));
3352
Ben Murdochda12d292016-06-02 14:46:10 +01003353 bool is_self_call = function.is_identical_to(info()->closure());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003354
Ben Murdochda12d292016-06-02 14:46:10 +01003355 // Invoke function.
3356 if (is_self_call) {
3357 Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
3358 if (is_tail_call) {
3359 __ Jump(self, RelocInfo::CODE_TARGET);
3360 } else {
3361 __ Call(self, RelocInfo::CODE_TARGET);
3362 }
3363 } else {
3364 __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3365 if (is_tail_call) {
3366 __ Jump(at);
3367 } else {
3368 __ Call(at);
3369 }
3370 }
3371
3372 if (!is_tail_call) {
3373 // Set up deoptimization.
3374 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3375 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003376 } else {
3377 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdochda12d292016-06-02 14:46:10 +01003378 ParameterCount actual(arity);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003379 ParameterCount expected(formal_parameter_count);
Ben Murdochda12d292016-06-02 14:46:10 +01003380 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3381 __ InvokeFunction(function_reg, expected, actual, flag, generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003382 }
3383}
3384
3385
3386void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3387 DCHECK(instr->context() != NULL);
3388 DCHECK(ToRegister(instr->context()).is(cp));
3389 Register input = ToRegister(instr->value());
3390 Register result = ToRegister(instr->result());
3391 Register scratch = scratch0();
3392
3393 // Deoptimize if not a heap number.
3394 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3395 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3396 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
3397
3398 Label done;
3399 Register exponent = scratch0();
3400 scratch = no_reg;
3401 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3402 // Check the sign of the argument. If the argument is positive, just
3403 // return it.
3404 __ Move(result, input);
3405 __ And(at, exponent, Operand(HeapNumber::kSignMask));
3406 __ Branch(&done, eq, at, Operand(zero_reg));
3407
3408 // Input is negative. Reverse its sign.
3409 // Preserve the value of all registers.
3410 {
3411 PushSafepointRegistersScope scope(this);
3412
3413 // Registers were saved at the safepoint, so we can use
3414 // many scratch registers.
3415 Register tmp1 = input.is(a1) ? a0 : a1;
3416 Register tmp2 = input.is(a2) ? a0 : a2;
3417 Register tmp3 = input.is(a3) ? a0 : a3;
3418 Register tmp4 = input.is(a4) ? a0 : a4;
3419
3420 // exponent: floating point exponent value.
3421
3422 Label allocated, slow;
3423 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3424 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3425 __ Branch(&allocated);
3426
3427 // Slow case: Call the runtime system to do the number allocation.
3428 __ bind(&slow);
3429
3430 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3431 instr->context());
3432 // Set the pointer to the new heap number in tmp.
3433 if (!tmp1.is(v0))
3434 __ mov(tmp1, v0);
3435 // Restore input_reg after call to runtime.
3436 __ LoadFromSafepointRegisterSlot(input, input);
3437 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3438
3439 __ bind(&allocated);
3440 // exponent: floating point exponent value.
3441 // tmp1: allocated heap number.
3442 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3443 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3444 __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3445 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3446
3447 __ StoreToSafepointRegisterSlot(tmp1, result);
3448 }
3449
3450 __ bind(&done);
3451}
3452
3453
3454void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3455 Register input = ToRegister(instr->value());
3456 Register result = ToRegister(instr->result());
3457 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3458 Label done;
3459 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3460 __ mov(result, input);
3461 __ subu(result, zero_reg, input);
3462 // Overflow if result is still negative, i.e. 0x80000000.
3463 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
3464 __ bind(&done);
3465}
3466
3467
3468void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3469 Register input = ToRegister(instr->value());
3470 Register result = ToRegister(instr->result());
3471 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3472 Label done;
3473 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3474 __ mov(result, input);
3475 __ dsubu(result, zero_reg, input);
3476 // Overflow if result is still negative, i.e. 0x80000000 00000000.
3477 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
3478 __ bind(&done);
3479}
3480
3481
3482void LCodeGen::DoMathAbs(LMathAbs* instr) {
3483 // Class for deferred case.
3484 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3485 public:
3486 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3487 : LDeferredCode(codegen), instr_(instr) { }
3488 void Generate() override {
3489 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3490 }
3491 LInstruction* instr() override { return instr_; }
3492
3493 private:
3494 LMathAbs* instr_;
3495 };
3496
3497 Representation r = instr->hydrogen()->value()->representation();
3498 if (r.IsDouble()) {
3499 FPURegister input = ToDoubleRegister(instr->value());
3500 FPURegister result = ToDoubleRegister(instr->result());
3501 __ abs_d(result, input);
3502 } else if (r.IsInteger32()) {
3503 EmitIntegerMathAbs(instr);
3504 } else if (r.IsSmi()) {
3505 EmitSmiMathAbs(instr);
3506 } else {
3507 // Representation is tagged.
3508 DeferredMathAbsTaggedHeapNumber* deferred =
3509 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3510 Register input = ToRegister(instr->value());
3511 // Smi check.
3512 __ JumpIfNotSmi(input, deferred->entry());
3513 // If smi, handle it directly.
3514 EmitSmiMathAbs(instr);
3515 __ bind(deferred->exit());
3516 }
3517}
3518
3519
3520void LCodeGen::DoMathFloor(LMathFloor* instr) {
3521 DoubleRegister input = ToDoubleRegister(instr->value());
3522 Register result = ToRegister(instr->result());
3523 Register scratch1 = scratch0();
3524 Register except_flag = ToRegister(instr->temp());
3525
3526 __ EmitFPUTruncate(kRoundToMinusInf,
3527 result,
3528 input,
3529 scratch1,
3530 double_scratch0(),
3531 except_flag);
3532
3533 // Deopt if the operation did not succeed.
3534 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3535 Operand(zero_reg));
3536
3537 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3538 // Test for -0.
3539 Label done;
3540 __ Branch(&done, ne, result, Operand(zero_reg));
3541 __ mfhc1(scratch1, input); // Get exponent/sign bits.
3542 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3543 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
3544 Operand(zero_reg));
3545 __ bind(&done);
3546 }
3547}
3548
3549
3550void LCodeGen::DoMathRound(LMathRound* instr) {
3551 DoubleRegister input = ToDoubleRegister(instr->value());
3552 Register result = ToRegister(instr->result());
3553 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3554 Register scratch = scratch0();
3555 Label done, check_sign_on_zero;
3556
3557 // Extract exponent bits.
3558 __ mfhc1(result, input);
3559 __ Ext(scratch,
3560 result,
3561 HeapNumber::kExponentShift,
3562 HeapNumber::kExponentBits);
3563
3564 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3565 Label skip1;
3566 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3567 __ mov(result, zero_reg);
3568 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3569 __ Branch(&check_sign_on_zero);
3570 } else {
3571 __ Branch(&done);
3572 }
3573 __ bind(&skip1);
3574
3575 // The following conversion will not work with numbers
3576 // outside of ]-2^32, 2^32[.
3577 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
3578 Operand(HeapNumber::kExponentBias + 32));
3579
3580 // Save the original sign for later comparison.
3581 __ And(scratch, result, Operand(HeapNumber::kSignMask));
3582
3583 __ Move(double_scratch0(), 0.5);
3584 __ add_d(double_scratch0(), input, double_scratch0());
3585
3586 // Check sign of the result: if the sign changed, the input
3587 // value was in ]0.5, 0[ and the result should be -0.
3588 __ mfhc1(result, double_scratch0());
3589 // mfhc1 sign-extends, clear the upper bits.
3590 __ dsll32(result, result, 0);
3591 __ dsrl32(result, result, 0);
3592 __ Xor(result, result, Operand(scratch));
3593 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3594 // ARM uses 'mi' here, which is 'lt'
3595 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
3596 } else {
3597 Label skip2;
3598 // ARM uses 'mi' here, which is 'lt'
3599 // Negating it results in 'ge'
3600 __ Branch(&skip2, ge, result, Operand(zero_reg));
3601 __ mov(result, zero_reg);
3602 __ Branch(&done);
3603 __ bind(&skip2);
3604 }
3605
3606 Register except_flag = scratch;
3607 __ EmitFPUTruncate(kRoundToMinusInf,
3608 result,
3609 double_scratch0(),
3610 at,
3611 double_scratch1,
3612 except_flag);
3613
3614 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3615 Operand(zero_reg));
3616
3617 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3618 // Test for -0.
3619 __ Branch(&done, ne, result, Operand(zero_reg));
3620 __ bind(&check_sign_on_zero);
3621 __ mfhc1(scratch, input); // Get exponent/sign bits.
3622 __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3623 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
3624 Operand(zero_reg));
3625 }
3626 __ bind(&done);
3627}
3628
3629
3630void LCodeGen::DoMathFround(LMathFround* instr) {
3631 DoubleRegister input = ToDoubleRegister(instr->value());
3632 DoubleRegister result = ToDoubleRegister(instr->result());
3633 __ cvt_s_d(result, input);
3634 __ cvt_d_s(result, result);
3635}
3636
3637
3638void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3639 DoubleRegister input = ToDoubleRegister(instr->value());
3640 DoubleRegister result = ToDoubleRegister(instr->result());
3641 __ sqrt_d(result, input);
3642}
3643
3644
3645void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3646 DoubleRegister input = ToDoubleRegister(instr->value());
3647 DoubleRegister result = ToDoubleRegister(instr->result());
3648 DoubleRegister temp = ToDoubleRegister(instr->temp());
3649
3650 DCHECK(!input.is(result));
3651
3652 // Note that according to ECMA-262 15.8.2.13:
3653 // Math.pow(-Infinity, 0.5) == Infinity
3654 // Math.sqrt(-Infinity) == NaN
3655 Label done;
3656 __ Move(temp, static_cast<double>(-V8_INFINITY));
3657 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3658 // Set up Infinity in the delay slot.
3659 // result is overwritten if the branch is not taken.
3660 __ neg_d(result, temp);
3661
3662 // Add +0 to convert -0 to +0.
3663 __ add_d(result, input, kDoubleRegZero);
3664 __ sqrt_d(result, result);
3665 __ bind(&done);
3666}
3667
3668
3669void LCodeGen::DoPower(LPower* instr) {
3670 Representation exponent_type = instr->hydrogen()->right()->representation();
3671 // Having marked this as a call, we can use any registers.
3672 // Just make sure that the input/output registers are the expected ones.
3673 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3674 DCHECK(!instr->right()->IsDoubleRegister() ||
3675 ToDoubleRegister(instr->right()).is(f4));
3676 DCHECK(!instr->right()->IsRegister() ||
3677 ToRegister(instr->right()).is(tagged_exponent));
3678 DCHECK(ToDoubleRegister(instr->left()).is(f2));
3679 DCHECK(ToDoubleRegister(instr->result()).is(f0));
3680
3681 if (exponent_type.IsSmi()) {
3682 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3683 __ CallStub(&stub);
3684 } else if (exponent_type.IsTagged()) {
3685 Label no_deopt;
3686 __ JumpIfSmi(tagged_exponent, &no_deopt);
3687 DCHECK(!a7.is(tagged_exponent));
3688 __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3689 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3690 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, a7, Operand(at));
3691 __ bind(&no_deopt);
3692 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3693 __ CallStub(&stub);
3694 } else if (exponent_type.IsInteger32()) {
3695 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3696 __ CallStub(&stub);
3697 } else {
3698 DCHECK(exponent_type.IsDouble());
3699 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3700 __ CallStub(&stub);
3701 }
3702}
3703
3704
3705void LCodeGen::DoMathExp(LMathExp* instr) {
3706 DoubleRegister input = ToDoubleRegister(instr->value());
3707 DoubleRegister result = ToDoubleRegister(instr->result());
3708 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3709 DoubleRegister double_scratch2 = double_scratch0();
3710 Register temp1 = ToRegister(instr->temp1());
3711 Register temp2 = ToRegister(instr->temp2());
3712
3713 MathExpGenerator::EmitMathExp(
3714 masm(), input, result, double_scratch1, double_scratch2,
3715 temp1, temp2, scratch0());
3716}
3717
3718
3719void LCodeGen::DoMathLog(LMathLog* instr) {
3720 __ PrepareCallCFunction(0, 1, scratch0());
3721 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3722 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3723 0, 1);
3724 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3725}
3726
3727
3728void LCodeGen::DoMathClz32(LMathClz32* instr) {
3729 Register input = ToRegister(instr->value());
3730 Register result = ToRegister(instr->result());
3731 __ Clz(result, input);
3732}
3733
Ben Murdochda12d292016-06-02 14:46:10 +01003734void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
3735 Register scratch1, Register scratch2,
3736 Register scratch3) {
3737#if DEBUG
3738 if (actual.is_reg()) {
3739 DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
3740 } else {
3741 DCHECK(!AreAliased(scratch1, scratch2, scratch3));
3742 }
3743#endif
3744 if (FLAG_code_comments) {
3745 if (actual.is_reg()) {
3746 Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
3747 } else {
3748 Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
3749 }
3750 }
3751
3752 // Check if next frame is an arguments adaptor frame.
3753 Register caller_args_count_reg = scratch1;
3754 Label no_arguments_adaptor, formal_parameter_count_loaded;
3755 __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3756 __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
3757 __ Branch(&no_arguments_adaptor, ne, scratch3,
3758 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3759
3760 // Drop current frame and load arguments count from arguments adaptor frame.
3761 __ mov(fp, scratch2);
3762 __ ld(caller_args_count_reg,
3763 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
3764 __ SmiUntag(caller_args_count_reg);
3765 __ Branch(&formal_parameter_count_loaded);
3766
3767 __ bind(&no_arguments_adaptor);
3768 // Load caller's formal parameter count
3769 __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
3770
3771 __ bind(&formal_parameter_count_loaded);
3772 __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
3773
3774 Comment(";;; }");
3775}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003776
3777void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
Ben Murdochda12d292016-06-02 14:46:10 +01003778 HInvokeFunction* hinstr = instr->hydrogen();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003779 DCHECK(ToRegister(instr->context()).is(cp));
3780 DCHECK(ToRegister(instr->function()).is(a1));
3781 DCHECK(instr->HasPointerMap());
3782
Ben Murdochda12d292016-06-02 14:46:10 +01003783 bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
3784
3785 if (is_tail_call) {
3786 DCHECK(!info()->saves_caller_doubles());
3787 ParameterCount actual(instr->arity());
3788 // It is safe to use t0, t1 and t2 as scratch registers here given that
3789 // we are not going to return to caller function anyway.
3790 PrepareForTailCall(actual, t0, t1, t2);
3791 }
3792
3793 Handle<JSFunction> known_function = hinstr->known_function();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003794 if (known_function.is_null()) {
3795 LPointerMap* pointers = instr->pointer_map();
3796 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdochda12d292016-06-02 14:46:10 +01003797 ParameterCount actual(instr->arity());
3798 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3799 __ InvokeFunction(a1, no_reg, actual, flag, generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003800 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01003801 CallKnownFunction(known_function, hinstr->formal_parameter_count(),
3802 instr->arity(), is_tail_call, instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003803 }
3804}
3805
3806
3807void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3808 DCHECK(ToRegister(instr->result()).is(v0));
3809
3810 if (instr->hydrogen()->IsTailCall()) {
3811 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3812
3813 if (instr->target()->IsConstantOperand()) {
3814 LConstantOperand* target = LConstantOperand::cast(instr->target());
3815 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3816 __ Jump(code, RelocInfo::CODE_TARGET);
3817 } else {
3818 DCHECK(instr->target()->IsRegister());
3819 Register target = ToRegister(instr->target());
3820 __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3821 __ Jump(target);
3822 }
3823 } else {
3824 LPointerMap* pointers = instr->pointer_map();
3825 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3826
3827 if (instr->target()->IsConstantOperand()) {
3828 LConstantOperand* target = LConstantOperand::cast(instr->target());
3829 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3830 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3831 __ Call(code, RelocInfo::CODE_TARGET);
3832 } else {
3833 DCHECK(instr->target()->IsRegister());
3834 Register target = ToRegister(instr->target());
3835 generator.BeforeCall(__ CallSize(target));
3836 __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3837 __ Call(target);
3838 }
3839 generator.AfterCall();
3840 }
3841}
3842
3843
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003844void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3845 DCHECK(ToRegister(instr->context()).is(cp));
3846 DCHECK(ToRegister(instr->constructor()).is(a1));
3847 DCHECK(ToRegister(instr->result()).is(v0));
3848
3849 __ li(a0, Operand(instr->arity()));
3850 if (instr->arity() == 1) {
3851 // We only need the allocation site for the case we have a length argument.
3852 // The case may bail out to the runtime, which will determine the correct
3853 // elements kind with the site.
3854 __ li(a2, instr->hydrogen()->site());
3855 } else {
3856 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3857 }
3858 ElementsKind kind = instr->hydrogen()->elements_kind();
3859 AllocationSiteOverrideMode override_mode =
3860 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3861 ? DISABLE_ALLOCATION_SITES
3862 : DONT_OVERRIDE;
3863
3864 if (instr->arity() == 0) {
3865 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3866 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3867 } else if (instr->arity() == 1) {
3868 Label done;
3869 if (IsFastPackedElementsKind(kind)) {
3870 Label packed_case;
3871 // We might need a change here,
3872 // look at the first argument.
3873 __ ld(a5, MemOperand(sp, 0));
3874 __ Branch(&packed_case, eq, a5, Operand(zero_reg));
3875
3876 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3877 ArraySingleArgumentConstructorStub stub(isolate(),
3878 holey_kind,
3879 override_mode);
3880 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3881 __ jmp(&done);
3882 __ bind(&packed_case);
3883 }
3884
3885 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3886 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3887 __ bind(&done);
3888 } else {
3889 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
3890 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3891 }
3892}
3893
3894
3895void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3896 CallRuntime(instr->function(), instr->arity(), instr);
3897}
3898
3899
3900void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3901 Register function = ToRegister(instr->function());
3902 Register code_object = ToRegister(instr->code_object());
3903 __ Daddu(code_object, code_object,
3904 Operand(Code::kHeaderSize - kHeapObjectTag));
3905 __ sd(code_object,
3906 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
3907}
3908
3909
3910void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3911 Register result = ToRegister(instr->result());
3912 Register base = ToRegister(instr->base_object());
3913 if (instr->offset()->IsConstantOperand()) {
3914 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3915 __ Daddu(result, base, Operand(ToInteger32(offset)));
3916 } else {
3917 Register offset = ToRegister(instr->offset());
3918 __ Daddu(result, base, offset);
3919 }
3920}
3921
3922
3923void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3924 Representation representation = instr->representation();
3925
3926 Register object = ToRegister(instr->object());
3927 Register scratch2 = scratch1();
3928 Register scratch1 = scratch0();
3929
3930 HObjectAccess access = instr->hydrogen()->access();
3931 int offset = access.offset();
3932 if (access.IsExternalMemory()) {
3933 Register value = ToRegister(instr->value());
3934 MemOperand operand = MemOperand(object, offset);
3935 __ Store(value, operand, representation);
3936 return;
3937 }
3938
3939 __ AssertNotSmi(object);
3940
3941 DCHECK(!representation.IsSmi() ||
3942 !instr->value()->IsConstantOperand() ||
3943 IsSmi(LConstantOperand::cast(instr->value())));
3944 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
3945 DCHECK(access.IsInobject());
3946 DCHECK(!instr->hydrogen()->has_transition());
3947 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3948 DoubleRegister value = ToDoubleRegister(instr->value());
3949 __ sdc1(value, FieldMemOperand(object, offset));
3950 return;
3951 }
3952
3953 if (instr->hydrogen()->has_transition()) {
3954 Handle<Map> transition = instr->hydrogen()->transition_map();
3955 AddDeprecationDependency(transition);
3956 __ li(scratch1, Operand(transition));
3957 __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
3958 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3959 Register temp = ToRegister(instr->temp());
3960 // Update the write barrier for the map field.
3961 __ RecordWriteForMap(object,
3962 scratch1,
3963 temp,
3964 GetRAState(),
3965 kSaveFPRegs);
3966 }
3967 }
3968
3969 // Do the store.
3970 Register destination = object;
3971 if (!access.IsInobject()) {
3972 destination = scratch1;
3973 __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
3974 }
3975
3976 if (representation.IsSmi() && SmiValuesAre32Bits() &&
3977 instr->hydrogen()->value()->representation().IsInteger32()) {
3978 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
3979 if (FLAG_debug_code) {
3980 __ Load(scratch2, FieldMemOperand(destination, offset), representation);
3981 __ AssertSmi(scratch2);
3982 }
3983 // Store int value directly to upper half of the smi.
3984 offset = SmiWordOffset(offset);
3985 representation = Representation::Integer32();
3986 }
3987 MemOperand operand = FieldMemOperand(destination, offset);
3988
3989 if (FLAG_unbox_double_fields && representation.IsDouble()) {
3990 DCHECK(access.IsInobject());
3991 DoubleRegister value = ToDoubleRegister(instr->value());
3992 __ sdc1(value, operand);
3993 } else {
3994 DCHECK(instr->value()->IsRegister());
3995 Register value = ToRegister(instr->value());
3996 __ Store(value, operand, representation);
3997 }
3998
3999 if (instr->hydrogen()->NeedsWriteBarrier()) {
4000 // Update the write barrier for the object for in-object properties.
4001 Register value = ToRegister(instr->value());
4002 __ RecordWriteField(destination,
4003 offset,
4004 value,
4005 scratch2,
4006 GetRAState(),
4007 kSaveFPRegs,
4008 EMIT_REMEMBERED_SET,
4009 instr->hydrogen()->SmiCheckForWriteBarrier(),
4010 instr->hydrogen()->PointersToHereCheckForValue());
4011 }
4012}
4013
4014
4015void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4016 DCHECK(ToRegister(instr->context()).is(cp));
4017 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4018 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4019
4020 if (instr->hydrogen()->HasVectorAndSlot()) {
4021 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
4022 }
4023
4024 __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
4025 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
4026 isolate(), instr->language_mode(),
4027 instr->hydrogen()->initialization_state()).code();
4028 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4029}
4030
4031
4032void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4033 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4034 Operand operand((int64_t)0);
4035 Register reg;
4036 if (instr->index()->IsConstantOperand()) {
4037 operand = ToOperand(instr->index());
4038 reg = ToRegister(instr->length());
4039 cc = CommuteCondition(cc);
4040 } else {
4041 reg = ToRegister(instr->index());
4042 operand = ToOperand(instr->length());
4043 }
4044 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4045 Label done;
4046 __ Branch(&done, NegateCondition(cc), reg, operand);
4047 __ stop("eliminated bounds check failed");
4048 __ bind(&done);
4049 } else {
4050 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
4051 }
4052}
4053
4054
4055void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4056 Register external_pointer = ToRegister(instr->elements());
4057 Register key = no_reg;
4058 ElementsKind elements_kind = instr->elements_kind();
4059 bool key_is_constant = instr->key()->IsConstantOperand();
4060 int constant_key = 0;
4061 if (key_is_constant) {
4062 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4063 if (constant_key & 0xF0000000) {
4064 Abort(kArrayIndexConstantValueTooBig);
4065 }
4066 } else {
4067 key = ToRegister(instr->key());
4068 }
4069 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4070 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4071 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
4072 : element_size_shift;
4073 int base_offset = instr->base_offset();
4074
4075 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
4076 Register address = scratch0();
4077 FPURegister value(ToDoubleRegister(instr->value()));
4078 if (key_is_constant) {
4079 if (constant_key != 0) {
4080 __ Daddu(address, external_pointer,
4081 Operand(constant_key << element_size_shift));
4082 } else {
4083 address = external_pointer;
4084 }
4085 } else {
4086 if (shift_size < 0) {
4087 if (shift_size == -32) {
4088 __ dsra32(address, key, 0);
4089 } else {
4090 __ dsra(address, key, -shift_size);
4091 }
4092 } else {
4093 __ dsll(address, key, shift_size);
4094 }
4095 __ Daddu(address, external_pointer, address);
4096 }
4097
4098 if (elements_kind == FLOAT32_ELEMENTS) {
4099 __ cvt_s_d(double_scratch0(), value);
4100 __ swc1(double_scratch0(), MemOperand(address, base_offset));
4101 } else { // Storing doubles, not floats.
4102 __ sdc1(value, MemOperand(address, base_offset));
4103 }
4104 } else {
4105 Register value(ToRegister(instr->value()));
4106 MemOperand mem_operand = PrepareKeyedOperand(
4107 key, external_pointer, key_is_constant, constant_key,
4108 element_size_shift, shift_size,
4109 base_offset);
4110 switch (elements_kind) {
4111 case UINT8_ELEMENTS:
4112 case UINT8_CLAMPED_ELEMENTS:
4113 case INT8_ELEMENTS:
4114 __ sb(value, mem_operand);
4115 break;
4116 case INT16_ELEMENTS:
4117 case UINT16_ELEMENTS:
4118 __ sh(value, mem_operand);
4119 break;
4120 case INT32_ELEMENTS:
4121 case UINT32_ELEMENTS:
4122 __ sw(value, mem_operand);
4123 break;
4124 case FLOAT32_ELEMENTS:
4125 case FLOAT64_ELEMENTS:
4126 case FAST_DOUBLE_ELEMENTS:
4127 case FAST_ELEMENTS:
4128 case FAST_SMI_ELEMENTS:
4129 case FAST_HOLEY_DOUBLE_ELEMENTS:
4130 case FAST_HOLEY_ELEMENTS:
4131 case FAST_HOLEY_SMI_ELEMENTS:
4132 case DICTIONARY_ELEMENTS:
4133 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4134 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01004135 case FAST_STRING_WRAPPER_ELEMENTS:
4136 case SLOW_STRING_WRAPPER_ELEMENTS:
4137 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004138 UNREACHABLE();
4139 break;
4140 }
4141 }
4142}
4143
4144
4145void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4146 DoubleRegister value = ToDoubleRegister(instr->value());
4147 Register elements = ToRegister(instr->elements());
4148 Register scratch = scratch0();
4149 DoubleRegister double_scratch = double_scratch0();
4150 bool key_is_constant = instr->key()->IsConstantOperand();
4151 int base_offset = instr->base_offset();
4152 Label not_nan, done;
4153
4154 // Calculate the effective address of the slot in the array to store the
4155 // double value.
4156 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4157 if (key_is_constant) {
4158 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4159 if (constant_key & 0xF0000000) {
4160 Abort(kArrayIndexConstantValueTooBig);
4161 }
4162 __ Daddu(scratch, elements,
4163 Operand((constant_key << element_size_shift) + base_offset));
4164 } else {
4165 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4166 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
4167 : element_size_shift;
4168 __ Daddu(scratch, elements, Operand(base_offset));
4169 DCHECK((shift_size == 3) || (shift_size == -29));
4170 if (shift_size == 3) {
4171 __ dsll(at, ToRegister(instr->key()), 3);
4172 } else if (shift_size == -29) {
4173 __ dsra(at, ToRegister(instr->key()), 29);
4174 }
4175 __ Daddu(scratch, scratch, at);
4176 }
4177
4178 if (instr->NeedsCanonicalization()) {
4179 __ FPUCanonicalizeNaN(double_scratch, value);
4180 __ sdc1(double_scratch, MemOperand(scratch, 0));
4181 } else {
4182 __ sdc1(value, MemOperand(scratch, 0));
4183 }
4184}
4185
4186
4187void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4188 Register value = ToRegister(instr->value());
4189 Register elements = ToRegister(instr->elements());
4190 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4191 : no_reg;
4192 Register scratch = scratch0();
4193 Register store_base = scratch;
4194 int offset = instr->base_offset();
4195
4196 // Do the store.
4197 if (instr->key()->IsConstantOperand()) {
4198 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4199 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4200 offset += ToInteger32(const_operand) * kPointerSize;
4201 store_base = elements;
4202 } else {
4203 // Even though the HLoadKeyed instruction forces the input
4204 // representation for the key to be an integer, the input gets replaced
4205 // during bound check elimination with the index argument to the bounds
4206 // check, which can be tagged, so that case must be handled here, too.
4207 if (instr->hydrogen()->key()->representation().IsSmi()) {
4208 __ SmiScale(scratch, key, kPointerSizeLog2);
4209 __ daddu(store_base, elements, scratch);
4210 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004211 __ Dlsa(store_base, elements, key, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004212 }
4213 }
4214
4215 Representation representation = instr->hydrogen()->value()->representation();
4216 if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4217 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4218 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
4219 if (FLAG_debug_code) {
4220 Register temp = scratch1();
4221 __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
4222 __ AssertSmi(temp);
4223 }
4224
4225 // Store int value directly to upper half of the smi.
4226 STATIC_ASSERT(kSmiTag == 0);
4227 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4228 offset = SmiWordOffset(offset);
4229 representation = Representation::Integer32();
4230 }
4231
4232 __ Store(value, MemOperand(store_base, offset), representation);
4233
4234 if (instr->hydrogen()->NeedsWriteBarrier()) {
4235 SmiCheck check_needed =
4236 instr->hydrogen()->value()->type().IsHeapObject()
4237 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4238 // Compute address of modified element and store it into key register.
4239 __ Daddu(key, store_base, Operand(offset));
4240 __ RecordWrite(elements,
4241 key,
4242 value,
4243 GetRAState(),
4244 kSaveFPRegs,
4245 EMIT_REMEMBERED_SET,
4246 check_needed,
4247 instr->hydrogen()->PointersToHereCheckForValue());
4248 }
4249}
4250
4251
4252void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4253 // By cases: external, fast double
4254 if (instr->is_fixed_typed_array()) {
4255 DoStoreKeyedExternalArray(instr);
4256 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4257 DoStoreKeyedFixedDoubleArray(instr);
4258 } else {
4259 DoStoreKeyedFixedArray(instr);
4260 }
4261}
4262
4263
4264void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4265 DCHECK(ToRegister(instr->context()).is(cp));
4266 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4267 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4268 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4269
4270 if (instr->hydrogen()->HasVectorAndSlot()) {
4271 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4272 }
4273
4274 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4275 isolate(), instr->language_mode(),
4276 instr->hydrogen()->initialization_state()).code();
4277 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4278}
4279
4280
4281void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4282 class DeferredMaybeGrowElements final : public LDeferredCode {
4283 public:
4284 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4285 : LDeferredCode(codegen), instr_(instr) {}
4286 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4287 LInstruction* instr() override { return instr_; }
4288
4289 private:
4290 LMaybeGrowElements* instr_;
4291 };
4292
4293 Register result = v0;
4294 DeferredMaybeGrowElements* deferred =
4295 new (zone()) DeferredMaybeGrowElements(this, instr);
4296 LOperand* key = instr->key();
4297 LOperand* current_capacity = instr->current_capacity();
4298
4299 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4300 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4301 DCHECK(key->IsConstantOperand() || key->IsRegister());
4302 DCHECK(current_capacity->IsConstantOperand() ||
4303 current_capacity->IsRegister());
4304
4305 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4306 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4307 int32_t constant_capacity =
4308 ToInteger32(LConstantOperand::cast(current_capacity));
4309 if (constant_key >= constant_capacity) {
4310 // Deferred case.
4311 __ jmp(deferred->entry());
4312 }
4313 } else if (key->IsConstantOperand()) {
4314 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4315 __ Branch(deferred->entry(), le, ToRegister(current_capacity),
4316 Operand(constant_key));
4317 } else if (current_capacity->IsConstantOperand()) {
4318 int32_t constant_capacity =
4319 ToInteger32(LConstantOperand::cast(current_capacity));
4320 __ Branch(deferred->entry(), ge, ToRegister(key),
4321 Operand(constant_capacity));
4322 } else {
4323 __ Branch(deferred->entry(), ge, ToRegister(key),
4324 Operand(ToRegister(current_capacity)));
4325 }
4326
4327 if (instr->elements()->IsRegister()) {
4328 __ mov(result, ToRegister(instr->elements()));
4329 } else {
4330 __ ld(result, ToMemOperand(instr->elements()));
4331 }
4332
4333 __ bind(deferred->exit());
4334}
4335
4336
4337void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4338 // TODO(3095996): Get rid of this. For now, we need to make the
4339 // result register contain a valid pointer because it is already
4340 // contained in the register pointer map.
4341 Register result = v0;
4342 __ mov(result, zero_reg);
4343
4344 // We have to call a stub.
4345 {
4346 PushSafepointRegistersScope scope(this);
4347 if (instr->object()->IsRegister()) {
4348 __ mov(result, ToRegister(instr->object()));
4349 } else {
4350 __ ld(result, ToMemOperand(instr->object()));
4351 }
4352
4353 LOperand* key = instr->key();
4354 if (key->IsConstantOperand()) {
Ben Murdochc5610432016-08-08 18:44:38 +01004355 LConstantOperand* constant_key = LConstantOperand::cast(key);
4356 int32_t int_key = ToInteger32(constant_key);
4357 if (Smi::IsValid(int_key)) {
4358 __ li(a3, Operand(Smi::FromInt(int_key)));
4359 } else {
4360 // We should never get here at runtime because there is a smi check on
4361 // the key before this point.
4362 __ stop("expected smi");
4363 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004364 } else {
4365 __ mov(a3, ToRegister(key));
4366 __ SmiTag(a3);
4367 }
4368
4369 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4370 instr->hydrogen()->kind());
4371 __ mov(a0, result);
4372 __ CallStub(&stub);
4373 RecordSafepointWithLazyDeopt(
4374 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4375 __ StoreToSafepointRegisterSlot(result, result);
4376 }
4377
4378 // Deopt on smi, which means the elements array changed to dictionary mode.
4379 __ SmiTst(result, at);
4380 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
4381}
4382
4383
4384void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4385 Register object_reg = ToRegister(instr->object());
4386 Register scratch = scratch0();
4387
4388 Handle<Map> from_map = instr->original_map();
4389 Handle<Map> to_map = instr->transitioned_map();
4390 ElementsKind from_kind = instr->from_kind();
4391 ElementsKind to_kind = instr->to_kind();
4392
4393 Label not_applicable;
4394 __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4395 __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4396
4397 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4398 Register new_map_reg = ToRegister(instr->new_map_temp());
4399 __ li(new_map_reg, Operand(to_map));
4400 __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4401 // Write barrier.
4402 __ RecordWriteForMap(object_reg,
4403 new_map_reg,
4404 scratch,
4405 GetRAState(),
4406 kDontSaveFPRegs);
4407 } else {
4408 DCHECK(object_reg.is(a0));
4409 DCHECK(ToRegister(instr->context()).is(cp));
4410 PushSafepointRegistersScope scope(this);
4411 __ li(a1, Operand(to_map));
4412 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4413 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4414 __ CallStub(&stub);
4415 RecordSafepointWithRegisters(
4416 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4417 }
4418 __ bind(&not_applicable);
4419}
4420
4421
4422void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4423 Register object = ToRegister(instr->object());
4424 Register temp = ToRegister(instr->temp());
4425 Label no_memento_found;
Ben Murdochda12d292016-06-02 14:46:10 +01004426 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004427 DeoptimizeIf(al, instr, Deoptimizer::kMementoFound);
4428 __ bind(&no_memento_found);
4429}
4430
4431
4432void LCodeGen::DoStringAdd(LStringAdd* instr) {
4433 DCHECK(ToRegister(instr->context()).is(cp));
4434 DCHECK(ToRegister(instr->left()).is(a1));
4435 DCHECK(ToRegister(instr->right()).is(a0));
4436 StringAddStub stub(isolate(),
4437 instr->hydrogen()->flags(),
4438 instr->hydrogen()->pretenure_flag());
4439 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4440}
4441
4442
4443void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4444 class DeferredStringCharCodeAt final : public LDeferredCode {
4445 public:
4446 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4447 : LDeferredCode(codegen), instr_(instr) { }
4448 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4449 LInstruction* instr() override { return instr_; }
4450
4451 private:
4452 LStringCharCodeAt* instr_;
4453 };
4454
4455 DeferredStringCharCodeAt* deferred =
4456 new(zone()) DeferredStringCharCodeAt(this, instr);
4457 StringCharLoadGenerator::Generate(masm(),
4458 ToRegister(instr->string()),
4459 ToRegister(instr->index()),
4460 ToRegister(instr->result()),
4461 deferred->entry());
4462 __ bind(deferred->exit());
4463}
4464
4465
4466void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4467 Register string = ToRegister(instr->string());
4468 Register result = ToRegister(instr->result());
4469 Register scratch = scratch0();
4470
4471 // TODO(3095996): Get rid of this. For now, we need to make the
4472 // result register contain a valid pointer because it is already
4473 // contained in the register pointer map.
4474 __ mov(result, zero_reg);
4475
4476 PushSafepointRegistersScope scope(this);
4477 __ push(string);
4478 // Push the index as a smi. This is safe because of the checks in
4479 // DoStringCharCodeAt above.
4480 if (instr->index()->IsConstantOperand()) {
4481 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4482 __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4483 __ push(scratch);
4484 } else {
4485 Register index = ToRegister(instr->index());
4486 __ SmiTag(index);
4487 __ push(index);
4488 }
4489 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4490 instr->context());
4491 __ AssertSmi(v0);
4492 __ SmiUntag(v0);
4493 __ StoreToSafepointRegisterSlot(v0, result);
4494}
4495
4496
4497void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4498 class DeferredStringCharFromCode final : public LDeferredCode {
4499 public:
4500 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4501 : LDeferredCode(codegen), instr_(instr) { }
4502 void Generate() override {
4503 codegen()->DoDeferredStringCharFromCode(instr_);
4504 }
4505 LInstruction* instr() override { return instr_; }
4506
4507 private:
4508 LStringCharFromCode* instr_;
4509 };
4510
4511 DeferredStringCharFromCode* deferred =
4512 new(zone()) DeferredStringCharFromCode(this, instr);
4513
4514 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4515 Register char_code = ToRegister(instr->char_code());
4516 Register result = ToRegister(instr->result());
4517 Register scratch = scratch0();
4518 DCHECK(!char_code.is(result));
4519
4520 __ Branch(deferred->entry(), hi,
4521 char_code, Operand(String::kMaxOneByteCharCode));
4522 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004523 __ Dlsa(result, result, char_code, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004524 __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4525 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4526 __ Branch(deferred->entry(), eq, result, Operand(scratch));
4527 __ bind(deferred->exit());
4528}
4529
4530
4531void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4532 Register char_code = ToRegister(instr->char_code());
4533 Register result = ToRegister(instr->result());
4534
4535 // TODO(3095996): Get rid of this. For now, we need to make the
4536 // result register contain a valid pointer because it is already
4537 // contained in the register pointer map.
4538 __ mov(result, zero_reg);
4539
4540 PushSafepointRegistersScope scope(this);
4541 __ SmiTag(char_code);
4542 __ push(char_code);
4543 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4544 instr->context());
4545 __ StoreToSafepointRegisterSlot(v0, result);
4546}
4547
4548
4549void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4550 LOperand* input = instr->value();
4551 DCHECK(input->IsRegister() || input->IsStackSlot());
4552 LOperand* output = instr->result();
4553 DCHECK(output->IsDoubleRegister());
4554 FPURegister single_scratch = double_scratch0().low();
4555 if (input->IsStackSlot()) {
4556 Register scratch = scratch0();
4557 __ ld(scratch, ToMemOperand(input));
4558 __ mtc1(scratch, single_scratch);
4559 } else {
4560 __ mtc1(ToRegister(input), single_scratch);
4561 }
4562 __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4563}
4564
4565
4566void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4567 LOperand* input = instr->value();
4568 LOperand* output = instr->result();
4569
4570 FPURegister dbl_scratch = double_scratch0();
4571 __ mtc1(ToRegister(input), dbl_scratch);
4572 __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch);
4573}
4574
4575
4576void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4577 class DeferredNumberTagU final : public LDeferredCode {
4578 public:
4579 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4580 : LDeferredCode(codegen), instr_(instr) { }
4581 void Generate() override {
4582 codegen()->DoDeferredNumberTagIU(instr_,
4583 instr_->value(),
4584 instr_->temp1(),
4585 instr_->temp2(),
4586 UNSIGNED_INT32);
4587 }
4588 LInstruction* instr() override { return instr_; }
4589
4590 private:
4591 LNumberTagU* instr_;
4592 };
4593
4594 Register input = ToRegister(instr->value());
4595 Register result = ToRegister(instr->result());
4596
4597 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4598 __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4599 __ SmiTag(result, input);
4600 __ bind(deferred->exit());
4601}
4602
4603
4604void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4605 LOperand* value,
4606 LOperand* temp1,
4607 LOperand* temp2,
4608 IntegerSignedness signedness) {
4609 Label done, slow;
4610 Register src = ToRegister(value);
4611 Register dst = ToRegister(instr->result());
4612 Register tmp1 = scratch0();
4613 Register tmp2 = ToRegister(temp1);
4614 Register tmp3 = ToRegister(temp2);
4615 DoubleRegister dbl_scratch = double_scratch0();
4616
4617 if (signedness == SIGNED_INT32) {
4618 // There was overflow, so bits 30 and 31 of the original integer
4619 // disagree. Try to allocate a heap number in new space and store
4620 // the value in there. If that fails, call the runtime system.
4621 if (dst.is(src)) {
4622 __ SmiUntag(src, dst);
4623 __ Xor(src, src, Operand(0x80000000));
4624 }
4625 __ mtc1(src, dbl_scratch);
4626 __ cvt_d_w(dbl_scratch, dbl_scratch);
4627 } else {
4628 __ mtc1(src, dbl_scratch);
4629 __ Cvt_d_uw(dbl_scratch, dbl_scratch);
4630 }
4631
4632 if (FLAG_inline_new) {
4633 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
Ben Murdochc5610432016-08-08 18:44:38 +01004634 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004635 __ Branch(&done);
4636 }
4637
4638 // Slow case: Call the runtime system to do the number allocation.
4639 __ bind(&slow);
4640 {
4641 // TODO(3095996): Put a valid pointer value in the stack slot where the
4642 // result register is stored, as this register is in the pointer map, but
4643 // contains an integer value.
4644 __ mov(dst, zero_reg);
4645 // Preserve the value of all registers.
4646 PushSafepointRegistersScope scope(this);
4647
4648 // NumberTagI and NumberTagD use the context from the frame, rather than
4649 // the environment's HContext or HInlinedContext value.
4650 // They only call Runtime::kAllocateHeapNumber.
4651 // The corresponding HChange instructions are added in a phase that does
4652 // not have easy access to the local context.
4653 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4654 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4655 RecordSafepointWithRegisters(
4656 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4657 __ StoreToSafepointRegisterSlot(v0, dst);
4658 }
4659
4660 // Done. Put the value in dbl_scratch into the value of the allocated heap
4661 // number.
4662 __ bind(&done);
4663 __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4664}
4665
4666
4667void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4668 class DeferredNumberTagD final : public LDeferredCode {
4669 public:
4670 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4671 : LDeferredCode(codegen), instr_(instr) { }
4672 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4673 LInstruction* instr() override { return instr_; }
4674
4675 private:
4676 LNumberTagD* instr_;
4677 };
4678
4679 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4680 Register scratch = scratch0();
4681 Register reg = ToRegister(instr->result());
4682 Register temp1 = ToRegister(instr->temp());
4683 Register temp2 = ToRegister(instr->temp2());
4684
4685 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4686 if (FLAG_inline_new) {
4687 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4688 // We want the untagged address first for performance
Ben Murdochc5610432016-08-08 18:44:38 +01004689 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004690 } else {
4691 __ Branch(deferred->entry());
4692 }
4693 __ bind(deferred->exit());
Ben Murdochc5610432016-08-08 18:44:38 +01004694 __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004695}
4696
4697
4698void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4699 // TODO(3095996): Get rid of this. For now, we need to make the
4700 // result register contain a valid pointer because it is already
4701 // contained in the register pointer map.
4702 Register reg = ToRegister(instr->result());
4703 __ mov(reg, zero_reg);
4704
4705 PushSafepointRegistersScope scope(this);
4706 // NumberTagI and NumberTagD use the context from the frame, rather than
4707 // the environment's HContext or HInlinedContext value.
4708 // They only call Runtime::kAllocateHeapNumber.
4709 // The corresponding HChange instructions are added in a phase that does
4710 // not have easy access to the local context.
4711 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4712 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4713 RecordSafepointWithRegisters(
4714 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004715 __ StoreToSafepointRegisterSlot(v0, reg);
4716}
4717
4718
4719void LCodeGen::DoSmiTag(LSmiTag* instr) {
4720 HChange* hchange = instr->hydrogen();
4721 Register input = ToRegister(instr->value());
4722 Register output = ToRegister(instr->result());
4723 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4724 hchange->value()->CheckFlag(HValue::kUint32)) {
4725 __ And(at, input, Operand(0x80000000));
4726 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4727 }
4728 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4729 !hchange->value()->CheckFlag(HValue::kUint32)) {
4730 __ SmiTagCheckOverflow(output, input, at);
4731 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4732 } else {
4733 __ SmiTag(output, input);
4734 }
4735}
4736
4737
4738void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4739 Register scratch = scratch0();
4740 Register input = ToRegister(instr->value());
4741 Register result = ToRegister(instr->result());
4742 if (instr->needs_check()) {
4743 STATIC_ASSERT(kHeapObjectTag == 1);
4744 // If the input is a HeapObject, value of scratch won't be zero.
4745 __ And(scratch, input, Operand(kHeapObjectTag));
4746 __ SmiUntag(result, input);
4747 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
4748 } else {
4749 __ SmiUntag(result, input);
4750 }
4751}
4752
4753
4754void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4755 DoubleRegister result_reg,
4756 NumberUntagDMode mode) {
4757 bool can_convert_undefined_to_nan =
4758 instr->hydrogen()->can_convert_undefined_to_nan();
4759 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4760
4761 Register scratch = scratch0();
4762 Label convert, load_smi, done;
4763 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4764 // Smi check.
4765 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4766 // Heap number map check.
4767 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4768 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4769 if (can_convert_undefined_to_nan) {
4770 __ Branch(&convert, ne, scratch, Operand(at));
4771 } else {
4772 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
4773 Operand(at));
4774 }
4775 // Load heap number.
4776 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4777 if (deoptimize_on_minus_zero) {
4778 __ mfc1(at, result_reg);
4779 __ Branch(&done, ne, at, Operand(zero_reg));
4780 __ mfhc1(scratch, result_reg); // Get exponent/sign bits.
4781 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
4782 Operand(HeapNumber::kSignMask));
4783 }
4784 __ Branch(&done);
4785 if (can_convert_undefined_to_nan) {
4786 __ bind(&convert);
4787 // Convert undefined (and hole) to NaN.
4788 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4789 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
4790 Operand(at));
4791 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4792 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4793 __ Branch(&done);
4794 }
4795 } else {
4796 __ SmiUntag(scratch, input_reg);
4797 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4798 }
4799 // Smi to double register conversion
4800 __ bind(&load_smi);
4801 // scratch: untagged value of input_reg
4802 __ mtc1(scratch, result_reg);
4803 __ cvt_d_w(result_reg, result_reg);
4804 __ bind(&done);
4805}
4806
4807
4808void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4809 Register input_reg = ToRegister(instr->value());
4810 Register scratch1 = scratch0();
4811 Register scratch2 = ToRegister(instr->temp());
4812 DoubleRegister double_scratch = double_scratch0();
4813 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4814
4815 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4816 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4817
4818 Label done;
4819
4820 // The input is a tagged HeapObject.
4821 // Heap number map check.
4822 __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4823 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4824 // This 'at' value and scratch1 map value are used for tests in both clauses
4825 // of the if.
4826
4827 if (instr->truncating()) {
4828 // Performs a truncating conversion of a floating point number as used by
4829 // the JS bitwise operations.
4830 Label no_heap_number, check_bools, check_false;
4831 // Check HeapNumber map.
4832 __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
4833 __ mov(scratch2, input_reg); // In delay slot.
4834 __ TruncateHeapNumberToI(input_reg, scratch2);
4835 __ Branch(&done);
4836
4837 // Check for Oddballs. Undefined/False is converted to zero and True to one
4838 // for truncating conversions.
4839 __ bind(&no_heap_number);
4840 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4841 __ Branch(&check_bools, ne, input_reg, Operand(at));
4842 DCHECK(ToRegister(instr->result()).is(input_reg));
4843 __ Branch(USE_DELAY_SLOT, &done);
4844 __ mov(input_reg, zero_reg); // In delay slot.
4845
4846 __ bind(&check_bools);
4847 __ LoadRoot(at, Heap::kTrueValueRootIndex);
4848 __ Branch(&check_false, ne, scratch2, Operand(at));
4849 __ Branch(USE_DELAY_SLOT, &done);
4850 __ li(input_reg, Operand(1)); // In delay slot.
4851
4852 __ bind(&check_false);
4853 __ LoadRoot(at, Heap::kFalseValueRootIndex);
4854 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
4855 scratch2, Operand(at));
4856 __ Branch(USE_DELAY_SLOT, &done);
4857 __ mov(input_reg, zero_reg); // In delay slot.
4858 } else {
4859 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
4860 Operand(at));
4861
4862 // Load the double value.
4863 __ ldc1(double_scratch,
4864 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4865
4866 Register except_flag = scratch2;
4867 __ EmitFPUTruncate(kRoundToZero,
4868 input_reg,
4869 double_scratch,
4870 scratch1,
4871 double_scratch2,
4872 except_flag,
4873 kCheckForInexactConversion);
4874
4875 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4876 Operand(zero_reg));
4877
4878 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4879 __ Branch(&done, ne, input_reg, Operand(zero_reg));
4880
4881 __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits.
4882 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4883 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
4884 Operand(zero_reg));
4885 }
4886 }
4887 __ bind(&done);
4888}
4889
4890
4891void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4892 class DeferredTaggedToI final : public LDeferredCode {
4893 public:
4894 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4895 : LDeferredCode(codegen), instr_(instr) { }
4896 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4897 LInstruction* instr() override { return instr_; }
4898
4899 private:
4900 LTaggedToI* instr_;
4901 };
4902
4903 LOperand* input = instr->value();
4904 DCHECK(input->IsRegister());
4905 DCHECK(input->Equals(instr->result()));
4906
4907 Register input_reg = ToRegister(input);
4908
4909 if (instr->hydrogen()->value()->representation().IsSmi()) {
4910 __ SmiUntag(input_reg);
4911 } else {
4912 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4913
4914 // Let the deferred code handle the HeapObject case.
4915 __ JumpIfNotSmi(input_reg, deferred->entry());
4916
4917 // Smi to int32 conversion.
4918 __ SmiUntag(input_reg);
4919 __ bind(deferred->exit());
4920 }
4921}
4922
4923
4924void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4925 LOperand* input = instr->value();
4926 DCHECK(input->IsRegister());
4927 LOperand* result = instr->result();
4928 DCHECK(result->IsDoubleRegister());
4929
4930 Register input_reg = ToRegister(input);
4931 DoubleRegister result_reg = ToDoubleRegister(result);
4932
4933 HValue* value = instr->hydrogen()->value();
4934 NumberUntagDMode mode = value->representation().IsSmi()
4935 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4936
4937 EmitNumberUntagD(instr, input_reg, result_reg, mode);
4938}
4939
4940
4941void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4942 Register result_reg = ToRegister(instr->result());
4943 Register scratch1 = scratch0();
4944 DoubleRegister double_input = ToDoubleRegister(instr->value());
4945
4946 if (instr->truncating()) {
4947 __ TruncateDoubleToI(result_reg, double_input);
4948 } else {
4949 Register except_flag = LCodeGen::scratch1();
4950
4951 __ EmitFPUTruncate(kRoundToMinusInf,
4952 result_reg,
4953 double_input,
4954 scratch1,
4955 double_scratch0(),
4956 except_flag,
4957 kCheckForInexactConversion);
4958
4959 // Deopt if the operation did not succeed (except_flag != 0).
4960 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4961 Operand(zero_reg));
4962
4963 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4964 Label done;
4965 __ Branch(&done, ne, result_reg, Operand(zero_reg));
4966 __ mfhc1(scratch1, double_input); // Get exponent/sign bits.
4967 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4968 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
4969 Operand(zero_reg));
4970 __ bind(&done);
4971 }
4972 }
4973}
4974
4975
4976void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4977 Register result_reg = ToRegister(instr->result());
4978 Register scratch1 = LCodeGen::scratch0();
4979 DoubleRegister double_input = ToDoubleRegister(instr->value());
4980
4981 if (instr->truncating()) {
4982 __ TruncateDoubleToI(result_reg, double_input);
4983 } else {
4984 Register except_flag = LCodeGen::scratch1();
4985
4986 __ EmitFPUTruncate(kRoundToMinusInf,
4987 result_reg,
4988 double_input,
4989 scratch1,
4990 double_scratch0(),
4991 except_flag,
4992 kCheckForInexactConversion);
4993
4994 // Deopt if the operation did not succeed (except_flag != 0).
4995 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4996 Operand(zero_reg));
4997
4998 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4999 Label done;
5000 __ Branch(&done, ne, result_reg, Operand(zero_reg));
5001 __ mfhc1(scratch1, double_input); // Get exponent/sign bits.
5002 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5003 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
5004 Operand(zero_reg));
5005 __ bind(&done);
5006 }
5007 }
5008 __ SmiTag(result_reg, result_reg);
5009}
5010
5011
5012void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5013 LOperand* input = instr->value();
5014 __ SmiTst(ToRegister(input), at);
5015 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
5016}
5017
5018
5019void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5020 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5021 LOperand* input = instr->value();
5022 __ SmiTst(ToRegister(input), at);
5023 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
5024 }
5025}
5026
5027
5028void LCodeGen::DoCheckArrayBufferNotNeutered(
5029 LCheckArrayBufferNotNeutered* instr) {
5030 Register view = ToRegister(instr->view());
5031 Register scratch = scratch0();
5032
5033 __ ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
5034 __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
5035 __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
5036 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg));
5037}
5038
5039
5040void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5041 Register input = ToRegister(instr->value());
5042 Register scratch = scratch0();
5043
5044 __ GetObjectType(input, scratch, scratch);
5045
5046 if (instr->hydrogen()->is_interval_check()) {
5047 InstanceType first;
5048 InstanceType last;
5049 instr->hydrogen()->GetCheckInterval(&first, &last);
5050
5051 // If there is only one type in the interval check for equality.
5052 if (first == last) {
5053 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
5054 Operand(first));
5055 } else {
5056 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
5057 Operand(first));
5058 // Omit check for the last type.
5059 if (last != LAST_TYPE) {
5060 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
5061 Operand(last));
5062 }
5063 }
5064 } else {
5065 uint8_t mask;
5066 uint8_t tag;
5067 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5068
5069 if (base::bits::IsPowerOfTwo32(mask)) {
5070 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5071 __ And(at, scratch, mask);
5072 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
5073 at, Operand(zero_reg));
5074 } else {
5075 __ And(scratch, scratch, Operand(mask));
5076 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
5077 Operand(tag));
5078 }
5079 }
5080}
5081
5082
5083void LCodeGen::DoCheckValue(LCheckValue* instr) {
5084 Register reg = ToRegister(instr->value());
5085 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5086 AllowDeferredHandleDereference smi_check;
5087 if (isolate()->heap()->InNewSpace(*object)) {
5088 Register reg = ToRegister(instr->value());
5089 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5090 __ li(at, Operand(cell));
5091 __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
5092 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
5093 } else {
5094 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
5095 }
5096}
5097
5098
5099void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5100 {
5101 PushSafepointRegistersScope scope(this);
5102 __ push(object);
5103 __ mov(cp, zero_reg);
5104 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5105 RecordSafepointWithRegisters(
5106 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5107 __ StoreToSafepointRegisterSlot(v0, scratch0());
5108 }
5109 __ SmiTst(scratch0(), at);
5110 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
5111 Operand(zero_reg));
5112}
5113
5114
5115void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5116 class DeferredCheckMaps final : public LDeferredCode {
5117 public:
5118 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5119 : LDeferredCode(codegen), instr_(instr), object_(object) {
5120 SetExit(check_maps());
5121 }
5122 void Generate() override {
5123 codegen()->DoDeferredInstanceMigration(instr_, object_);
5124 }
5125 Label* check_maps() { return &check_maps_; }
5126 LInstruction* instr() override { return instr_; }
5127
5128 private:
5129 LCheckMaps* instr_;
5130 Label check_maps_;
5131 Register object_;
5132 };
5133
5134 if (instr->hydrogen()->IsStabilityCheck()) {
5135 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5136 for (int i = 0; i < maps->size(); ++i) {
5137 AddStabilityDependency(maps->at(i).handle());
5138 }
5139 return;
5140 }
5141
5142 Register map_reg = scratch0();
5143 LOperand* input = instr->value();
5144 DCHECK(input->IsRegister());
5145 Register reg = ToRegister(input);
5146 __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5147
5148 DeferredCheckMaps* deferred = NULL;
5149 if (instr->hydrogen()->HasMigrationTarget()) {
5150 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5151 __ bind(deferred->check_maps());
5152 }
5153
5154 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5155 Label success;
5156 for (int i = 0; i < maps->size() - 1; i++) {
5157 Handle<Map> map = maps->at(i).handle();
5158 __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5159 }
5160 Handle<Map> map = maps->at(maps->size() - 1).handle();
5161 // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5162 if (instr->hydrogen()->HasMigrationTarget()) {
5163 __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5164 } else {
5165 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
5166 }
5167
5168 __ bind(&success);
5169}
5170
5171
5172void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5173 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5174 Register result_reg = ToRegister(instr->result());
5175 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5176 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5177}
5178
5179
5180void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5181 Register unclamped_reg = ToRegister(instr->unclamped());
5182 Register result_reg = ToRegister(instr->result());
5183 __ ClampUint8(result_reg, unclamped_reg);
5184}
5185
5186
5187void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5188 Register scratch = scratch0();
5189 Register input_reg = ToRegister(instr->unclamped());
5190 Register result_reg = ToRegister(instr->result());
5191 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5192 Label is_smi, done, heap_number;
5193
5194 // Both smi and heap number cases are handled.
5195 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5196
5197 // Check for heap number
5198 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5199 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5200
5201 // Check for undefined. Undefined is converted to zero for clamping
5202 // conversions.
5203 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
5204 Operand(factory()->undefined_value()));
5205 __ mov(result_reg, zero_reg);
5206 __ jmp(&done);
5207
5208 // Heap number
5209 __ bind(&heap_number);
5210 __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5211 HeapNumber::kValueOffset));
5212 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5213 __ jmp(&done);
5214
5215 __ bind(&is_smi);
5216 __ ClampUint8(result_reg, scratch);
5217
5218 __ bind(&done);
5219}
5220
5221
5222void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5223 DoubleRegister value_reg = ToDoubleRegister(instr->value());
5224 Register result_reg = ToRegister(instr->result());
5225 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5226 __ FmoveHigh(result_reg, value_reg);
5227 } else {
5228 __ FmoveLow(result_reg, value_reg);
5229 }
5230}
5231
5232
5233void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5234 Register hi_reg = ToRegister(instr->hi());
5235 Register lo_reg = ToRegister(instr->lo());
5236 DoubleRegister result_reg = ToDoubleRegister(instr->result());
5237 __ Move(result_reg, lo_reg, hi_reg);
5238}
5239
5240
5241void LCodeGen::DoAllocate(LAllocate* instr) {
5242 class DeferredAllocate final : public LDeferredCode {
5243 public:
5244 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5245 : LDeferredCode(codegen), instr_(instr) { }
5246 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5247 LInstruction* instr() override { return instr_; }
5248
5249 private:
5250 LAllocate* instr_;
5251 };
5252
5253 DeferredAllocate* deferred =
5254 new(zone()) DeferredAllocate(this, instr);
5255
5256 Register result = ToRegister(instr->result());
5257 Register scratch = ToRegister(instr->temp1());
5258 Register scratch2 = ToRegister(instr->temp2());
5259
5260 // Allocate memory for the object.
Ben Murdochc5610432016-08-08 18:44:38 +01005261 AllocationFlags flags = NO_ALLOCATION_FLAGS;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005262 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5263 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5264 }
5265 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5266 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5267 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5268 }
Ben Murdochc5610432016-08-08 18:44:38 +01005269
5270 if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5271 flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
5272 }
5273 DCHECK(!instr->hydrogen()->IsAllocationFolded());
5274
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005275 if (instr->size()->IsConstantOperand()) {
5276 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5277 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5278 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5279 } else {
5280 Register size = ToRegister(instr->size());
5281 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5282 }
5283
5284 __ bind(deferred->exit());
5285
5286 if (instr->hydrogen()->MustPrefillWithFiller()) {
5287 STATIC_ASSERT(kHeapObjectTag == 1);
5288 if (instr->size()->IsConstantOperand()) {
5289 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5290 __ li(scratch, Operand(size - kHeapObjectTag));
5291 } else {
5292 __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5293 }
5294 __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5295 Label loop;
5296 __ bind(&loop);
5297 __ Dsubu(scratch, scratch, Operand(kPointerSize));
5298 __ Daddu(at, result, Operand(scratch));
5299 __ sd(scratch2, MemOperand(at));
5300 __ Branch(&loop, ge, scratch, Operand(zero_reg));
5301 }
5302}
5303
5304
5305void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5306 Register result = ToRegister(instr->result());
5307
5308 // TODO(3095996): Get rid of this. For now, we need to make the
5309 // result register contain a valid pointer because it is already
5310 // contained in the register pointer map.
5311 __ mov(result, zero_reg);
5312
5313 PushSafepointRegistersScope scope(this);
5314 if (instr->size()->IsRegister()) {
5315 Register size = ToRegister(instr->size());
5316 DCHECK(!size.is(result));
5317 __ SmiTag(size);
5318 __ push(size);
5319 } else {
5320 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5321 if (size >= 0 && size <= Smi::kMaxValue) {
5322 __ li(v0, Operand(Smi::FromInt(size)));
5323 __ Push(v0);
5324 } else {
5325 // We should never get here at runtime => abort
5326 __ stop("invalid allocation size");
5327 return;
5328 }
5329 }
5330
5331 int flags = AllocateDoubleAlignFlag::encode(
5332 instr->hydrogen()->MustAllocateDoubleAligned());
5333 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5334 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5335 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5336 } else {
5337 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5338 }
5339 __ li(v0, Operand(Smi::FromInt(flags)));
5340 __ Push(v0);
5341
5342 CallRuntimeFromDeferred(
5343 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5344 __ StoreToSafepointRegisterSlot(v0, result);
Ben Murdochc5610432016-08-08 18:44:38 +01005345
5346 if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5347 AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
5348 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5349 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5350 allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
5351 }
5352 // If the allocation folding dominator allocate triggered a GC, allocation
5353 // happend in the runtime. We have to reset the top pointer to virtually
5354 // undo the allocation.
5355 ExternalReference allocation_top =
5356 AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
5357 Register top_address = scratch0();
5358 __ Dsubu(v0, v0, Operand(kHeapObjectTag));
5359 __ li(top_address, Operand(allocation_top));
5360 __ sd(v0, MemOperand(top_address));
5361 __ Daddu(v0, v0, Operand(kHeapObjectTag));
5362 }
5363}
5364
5365void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
5366 DCHECK(instr->hydrogen()->IsAllocationFolded());
5367 DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
5368 Register result = ToRegister(instr->result());
5369 Register scratch1 = ToRegister(instr->temp1());
5370 Register scratch2 = ToRegister(instr->temp2());
5371
5372 AllocationFlags flags = ALLOCATION_FOLDED;
5373 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5374 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5375 }
5376 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5377 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5378 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5379 }
5380 if (instr->size()->IsConstantOperand()) {
5381 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5382 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5383 __ FastAllocate(size, result, scratch1, scratch2, flags);
5384 } else {
5385 Register size = ToRegister(instr->size());
5386 __ FastAllocate(size, result, scratch1, scratch2, flags);
5387 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005388}
5389
5390
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005391void LCodeGen::DoTypeof(LTypeof* instr) {
5392 DCHECK(ToRegister(instr->value()).is(a3));
5393 DCHECK(ToRegister(instr->result()).is(v0));
5394 Label end, do_call;
5395 Register value_register = ToRegister(instr->value());
5396 __ JumpIfNotSmi(value_register, &do_call);
5397 __ li(v0, Operand(isolate()->factory()->number_string()));
5398 __ jmp(&end);
5399 __ bind(&do_call);
5400 TypeofStub stub(isolate());
5401 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5402 __ bind(&end);
5403}
5404
5405
5406void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5407 Register input = ToRegister(instr->value());
5408
5409 Register cmp1 = no_reg;
5410 Operand cmp2 = Operand(no_reg);
5411
5412 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5413 instr->FalseLabel(chunk_),
5414 input,
5415 instr->type_literal(),
5416 &cmp1,
5417 &cmp2);
5418
5419 DCHECK(cmp1.is_valid());
5420 DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
5421
5422 if (final_branch_condition != kNoCondition) {
5423 EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5424 }
5425}
5426
5427
5428Condition LCodeGen::EmitTypeofIs(Label* true_label,
5429 Label* false_label,
5430 Register input,
5431 Handle<String> type_name,
5432 Register* cmp1,
5433 Operand* cmp2) {
5434 // This function utilizes the delay slot heavily. This is used to load
5435 // values that are always usable without depending on the type of the input
5436 // register.
5437 Condition final_branch_condition = kNoCondition;
5438 Register scratch = scratch0();
5439 Factory* factory = isolate()->factory();
5440 if (String::Equals(type_name, factory->number_string())) {
5441 __ JumpIfSmi(input, true_label);
5442 __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5443 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5444 *cmp1 = input;
5445 *cmp2 = Operand(at);
5446 final_branch_condition = eq;
5447
5448 } else if (String::Equals(type_name, factory->string_string())) {
5449 __ JumpIfSmi(input, false_label);
5450 __ GetObjectType(input, input, scratch);
5451 *cmp1 = scratch;
5452 *cmp2 = Operand(FIRST_NONSTRING_TYPE);
5453 final_branch_condition = lt;
5454
5455 } else if (String::Equals(type_name, factory->symbol_string())) {
5456 __ JumpIfSmi(input, false_label);
5457 __ GetObjectType(input, input, scratch);
5458 *cmp1 = scratch;
5459 *cmp2 = Operand(SYMBOL_TYPE);
5460 final_branch_condition = eq;
5461
5462 } else if (String::Equals(type_name, factory->boolean_string())) {
5463 __ LoadRoot(at, Heap::kTrueValueRootIndex);
5464 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5465 __ LoadRoot(at, Heap::kFalseValueRootIndex);
5466 *cmp1 = at;
5467 *cmp2 = Operand(input);
5468 final_branch_condition = eq;
5469
5470 } else if (String::Equals(type_name, factory->undefined_string())) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005471 __ LoadRoot(at, Heap::kNullValueRootIndex);
5472 __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005473 // The first instruction of JumpIfSmi is an And - it is safe in the delay
5474 // slot.
5475 __ JumpIfSmi(input, false_label);
5476 // Check for undetectable objects => true.
5477 __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5478 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5479 __ And(at, at, 1 << Map::kIsUndetectable);
5480 *cmp1 = at;
5481 *cmp2 = Operand(zero_reg);
5482 final_branch_condition = ne;
5483
5484 } else if (String::Equals(type_name, factory->function_string())) {
5485 __ JumpIfSmi(input, false_label);
5486 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5487 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5488 __ And(scratch, scratch,
5489 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5490 *cmp1 = scratch;
5491 *cmp2 = Operand(1 << Map::kIsCallable);
5492 final_branch_condition = eq;
5493
5494 } else if (String::Equals(type_name, factory->object_string())) {
5495 __ JumpIfSmi(input, false_label);
5496 __ LoadRoot(at, Heap::kNullValueRootIndex);
5497 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5498 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5499 __ GetObjectType(input, scratch, scratch1());
5500 __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
5501 // Check for callable or undetectable objects => false.
5502 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5503 __ And(at, scratch,
5504 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5505 *cmp1 = at;
5506 *cmp2 = Operand(zero_reg);
5507 final_branch_condition = eq;
5508
5509// clang-format off
5510#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5511 } else if (String::Equals(type_name, factory->type##_string())) { \
5512 __ JumpIfSmi(input, false_label); \
5513 __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset)); \
5514 __ LoadRoot(at, Heap::k##Type##MapRootIndex); \
5515 *cmp1 = input; \
5516 *cmp2 = Operand(at); \
5517 final_branch_condition = eq;
5518 SIMD128_TYPES(SIMD128_TYPE)
5519#undef SIMD128_TYPE
5520 // clang-format on
5521
5522
5523 } else {
5524 *cmp1 = at;
5525 *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
5526 __ Branch(false_label);
5527 }
5528
5529 return final_branch_condition;
5530}
5531
5532
5533void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5534 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5535 // Ensure that we have enough space after the previous lazy-bailout
5536 // instruction for patching the code here.
5537 int current_pc = masm()->pc_offset();
5538 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5539 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5540 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5541 while (padding_size > 0) {
5542 __ nop();
5543 padding_size -= Assembler::kInstrSize;
5544 }
5545 }
5546 }
5547 last_lazy_deopt_pc_ = masm()->pc_offset();
5548}
5549
5550
5551void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5552 last_lazy_deopt_pc_ = masm()->pc_offset();
5553 DCHECK(instr->HasEnvironment());
5554 LEnvironment* env = instr->environment();
5555 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5556 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5557}
5558
5559
5560void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5561 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5562 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5563 // needed return address), even though the implementation of LAZY and EAGER is
5564 // now identical. When LAZY is eventually completely folded into EAGER, remove
5565 // the special case below.
5566 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5567 type = Deoptimizer::LAZY;
5568 }
5569
5570 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
5571 Operand(zero_reg));
5572}
5573
5574
5575void LCodeGen::DoDummy(LDummy* instr) {
5576 // Nothing to see here, move on!
5577}
5578
5579
5580void LCodeGen::DoDummyUse(LDummyUse* instr) {
5581 // Nothing to see here, move on!
5582}
5583
5584
5585void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5586 PushSafepointRegistersScope scope(this);
5587 LoadContextFromDeferred(instr->context());
5588 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5589 RecordSafepointWithLazyDeopt(
5590 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5591 DCHECK(instr->HasEnvironment());
5592 LEnvironment* env = instr->environment();
5593 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5594}
5595
5596
5597void LCodeGen::DoStackCheck(LStackCheck* instr) {
5598 class DeferredStackCheck final : public LDeferredCode {
5599 public:
5600 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5601 : LDeferredCode(codegen), instr_(instr) { }
5602 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5603 LInstruction* instr() override { return instr_; }
5604
5605 private:
5606 LStackCheck* instr_;
5607 };
5608
5609 DCHECK(instr->HasEnvironment());
5610 LEnvironment* env = instr->environment();
5611 // There is no LLazyBailout instruction for stack-checks. We have to
5612 // prepare for lazy deoptimization explicitly here.
5613 if (instr->hydrogen()->is_function_entry()) {
5614 // Perform stack overflow check.
5615 Label done;
5616 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5617 __ Branch(&done, hs, sp, Operand(at));
5618 DCHECK(instr->context()->IsRegister());
5619 DCHECK(ToRegister(instr->context()).is(cp));
5620 CallCode(isolate()->builtins()->StackCheck(),
5621 RelocInfo::CODE_TARGET,
5622 instr);
5623 __ bind(&done);
5624 } else {
5625 DCHECK(instr->hydrogen()->is_backwards_branch());
5626 // Perform stack overflow check if this goto needs it before jumping.
5627 DeferredStackCheck* deferred_stack_check =
5628 new(zone()) DeferredStackCheck(this, instr);
5629 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5630 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5631 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5632 __ bind(instr->done_label());
5633 deferred_stack_check->SetExit(instr->done_label());
5634 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5635 // Don't record a deoptimization index for the safepoint here.
5636 // This will be done explicitly when emitting call and the safepoint in
5637 // the deferred code.
5638 }
5639}
5640
5641
5642void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5643 // This is a pseudo-instruction that ensures that the environment here is
5644 // properly registered for deoptimization and records the assembler's PC
5645 // offset.
5646 LEnvironment* environment = instr->environment();
5647
5648 // If the environment were already registered, we would have no way of
5649 // backpatching it with the spill slot operands.
5650 DCHECK(!environment->HasBeenRegistered());
5651 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5652
5653 GenerateOsrPrologue();
5654}
5655
5656
5657void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5658 Register result = ToRegister(instr->result());
5659 Register object = ToRegister(instr->object());
5660
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005661 Label use_cache, call_runtime;
5662 DCHECK(object.is(a0));
Ben Murdoch097c5b22016-05-18 11:27:45 +01005663 __ CheckEnumCache(&call_runtime);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005664
5665 __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
5666 __ Branch(&use_cache);
5667
5668 // Get the set of properties to enumerate.
5669 __ bind(&call_runtime);
5670 __ push(object);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005671 CallRuntime(Runtime::kForInEnumerate, instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005672 __ bind(&use_cache);
5673}
5674
5675
5676void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5677 Register map = ToRegister(instr->map());
5678 Register result = ToRegister(instr->result());
5679 Label load_cache, done;
5680 __ EnumLength(result, map);
5681 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5682 __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5683 __ jmp(&done);
5684
5685 __ bind(&load_cache);
5686 __ LoadInstanceDescriptors(map, result);
5687 __ ld(result,
5688 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5689 __ ld(result,
5690 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5691 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
5692
5693 __ bind(&done);
5694}
5695
5696
5697void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5698 Register object = ToRegister(instr->value());
5699 Register map = ToRegister(instr->map());
5700 __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5701 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
5702}
5703
5704
5705void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5706 Register result,
5707 Register object,
5708 Register index) {
5709 PushSafepointRegistersScope scope(this);
5710 __ Push(object, index);
5711 __ mov(cp, zero_reg);
5712 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5713 RecordSafepointWithRegisters(
5714 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5715 __ StoreToSafepointRegisterSlot(v0, result);
5716}
5717
5718
5719void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5720 class DeferredLoadMutableDouble final : public LDeferredCode {
5721 public:
5722 DeferredLoadMutableDouble(LCodeGen* codegen,
5723 LLoadFieldByIndex* instr,
5724 Register result,
5725 Register object,
5726 Register index)
5727 : LDeferredCode(codegen),
5728 instr_(instr),
5729 result_(result),
5730 object_(object),
5731 index_(index) {
5732 }
5733 void Generate() override {
5734 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5735 }
5736 LInstruction* instr() override { return instr_; }
5737
5738 private:
5739 LLoadFieldByIndex* instr_;
5740 Register result_;
5741 Register object_;
5742 Register index_;
5743 };
5744
5745 Register object = ToRegister(instr->object());
5746 Register index = ToRegister(instr->index());
5747 Register result = ToRegister(instr->result());
5748 Register scratch = scratch0();
5749
5750 DeferredLoadMutableDouble* deferred;
5751 deferred = new(zone()) DeferredLoadMutableDouble(
5752 this, instr, result, object, index);
5753
5754 Label out_of_object, done;
5755
5756 __ And(scratch, index, Operand(Smi::FromInt(1)));
5757 __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
5758 __ dsra(index, index, 1);
5759
5760 __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5761 __ SmiScale(scratch, index, kPointerSizeLog2); // In delay slot.
5762 __ Daddu(scratch, object, scratch);
5763 __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5764
5765 __ Branch(&done);
5766
5767 __ bind(&out_of_object);
5768 __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5769 // Index is equal to negated out of object property index plus 1.
5770 __ Dsubu(scratch, result, scratch);
5771 __ ld(result, FieldMemOperand(scratch,
5772 FixedArray::kHeaderSize - kPointerSize));
5773 __ bind(deferred->exit());
5774 __ bind(&done);
5775}
5776
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005777#undef __
5778
5779} // namespace internal
5780} // namespace v8