blob: 30a59fcc198a6b26f060a7d59988bc3ab4e3ed94 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2012 the V8 project authors. All rights reserved.7
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "src/crankshaft/mips/lithium-codegen-mips.h"
29
30#include "src/base/bits.h"
31#include "src/code-factory.h"
32#include "src/code-stubs.h"
33#include "src/crankshaft/hydrogen-osr.h"
34#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
35#include "src/ic/ic.h"
36#include "src/ic/stub-cache.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000037
38namespace v8 {
39namespace internal {
40
41
42class SafepointGenerator final : public CallWrapper {
43 public:
44 SafepointGenerator(LCodeGen* codegen,
45 LPointerMap* pointers,
46 Safepoint::DeoptMode mode)
47 : codegen_(codegen),
48 pointers_(pointers),
49 deopt_mode_(mode) { }
50 virtual ~SafepointGenerator() {}
51
52 void BeforeCall(int call_size) const override {}
53
54 void AfterCall() const override {
55 codegen_->RecordSafepoint(pointers_, deopt_mode_);
56 }
57
58 private:
59 LCodeGen* codegen_;
60 LPointerMap* pointers_;
61 Safepoint::DeoptMode deopt_mode_;
62};
63
64
65#define __ masm()->
66
67bool LCodeGen::GenerateCode() {
68 LPhase phase("Z_Code generation", chunk());
69 DCHECK(is_unused());
70 status_ = GENERATING;
71
72 // Open a frame scope to indicate that there is a frame on the stack. The
73 // NONE indicates that the scope shouldn't actually generate code to set up
74 // the frame (that is done in GeneratePrologue).
75 FrameScope frame_scope(masm_, StackFrame::NONE);
76
77 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
78 GenerateJumpTable() && GenerateSafepointTable();
79}
80
81
82void LCodeGen::FinishCode(Handle<Code> code) {
83 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +010084 code->set_stack_slots(GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000085 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
86 PopulateDeoptimizationData(code);
87}
88
89
90void LCodeGen::SaveCallerDoubles() {
91 DCHECK(info()->saves_caller_doubles());
92 DCHECK(NeedsEagerFrame());
93 Comment(";;; Save clobbered callee double registers");
94 int count = 0;
95 BitVector* doubles = chunk()->allocated_double_registers();
96 BitVector::Iterator save_iterator(doubles);
97 while (!save_iterator.Done()) {
98 __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
99 MemOperand(sp, count * kDoubleSize));
100 save_iterator.Advance();
101 count++;
102 }
103}
104
105
106void LCodeGen::RestoreCallerDoubles() {
107 DCHECK(info()->saves_caller_doubles());
108 DCHECK(NeedsEagerFrame());
109 Comment(";;; Restore clobbered callee double registers");
110 BitVector* doubles = chunk()->allocated_double_registers();
111 BitVector::Iterator save_iterator(doubles);
112 int count = 0;
113 while (!save_iterator.Done()) {
114 __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
115 MemOperand(sp, count * kDoubleSize));
116 save_iterator.Advance();
117 count++;
118 }
119}
120
121
122bool LCodeGen::GeneratePrologue() {
123 DCHECK(is_generating());
124
125 if (info()->IsOptimizing()) {
126 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
127
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000128 // a1: Callee's JS function.
129 // cp: Callee's context.
130 // fp: Caller's frame pointer.
131 // lr: Caller's pc.
132 }
133
134 info()->set_prologue_offset(masm_->pc_offset());
135 if (NeedsEagerFrame()) {
136 if (info()->IsStub()) {
Ben Murdochda12d292016-06-02 14:46:10 +0100137 __ StubPrologue(StackFrame::STUB);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000138 } else {
139 __ Prologue(info()->GeneratePreagedPrologue());
140 }
141 frame_is_built_ = true;
142 }
143
144 // Reserve space for the stack slots needed by the code.
145 int slots = GetStackSlotCount();
146 if (slots > 0) {
147 if (FLAG_debug_code) {
148 __ Subu(sp, sp, Operand(slots * kPointerSize));
149 __ Push(a0, a1);
150 __ Addu(a0, sp, Operand(slots * kPointerSize));
151 __ li(a1, Operand(kSlotsZapValue));
152 Label loop;
153 __ bind(&loop);
154 __ Subu(a0, a0, Operand(kPointerSize));
155 __ sw(a1, MemOperand(a0, 2 * kPointerSize));
156 __ Branch(&loop, ne, a0, Operand(sp));
157 __ Pop(a0, a1);
158 } else {
159 __ Subu(sp, sp, Operand(slots * kPointerSize));
160 }
161 }
162
163 if (info()->saves_caller_doubles()) {
164 SaveCallerDoubles();
165 }
166 return !is_aborted();
167}
168
169
170void LCodeGen::DoPrologue(LPrologue* instr) {
171 Comment(";;; Prologue begin");
172
173 // Possibly allocate a local context.
174 if (info()->scope()->num_heap_slots() > 0) {
175 Comment(";;; Allocate local context");
176 bool need_write_barrier = true;
177 // Argument to NewContext is the function, which is in a1.
178 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
179 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
180 if (info()->scope()->is_script_scope()) {
181 __ push(a1);
182 __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
183 __ CallRuntime(Runtime::kNewScriptContext);
184 deopt_mode = Safepoint::kLazyDeopt;
185 } else if (slots <= FastNewContextStub::kMaximumSlots) {
186 FastNewContextStub stub(isolate(), slots);
187 __ CallStub(&stub);
188 // Result of FastNewContextStub is always in new space.
189 need_write_barrier = false;
190 } else {
191 __ push(a1);
192 __ CallRuntime(Runtime::kNewFunctionContext);
193 }
194 RecordSafepoint(deopt_mode);
195
196 // Context is returned in both v0. It replaces the context passed to us.
197 // It's saved in the stack and kept live in cp.
198 __ mov(cp, v0);
199 __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
200 // Copy any necessary parameters into the context.
201 int num_parameters = scope()->num_parameters();
202 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
203 for (int i = first_parameter; i < num_parameters; i++) {
204 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
205 if (var->IsContextSlot()) {
206 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
207 (num_parameters - 1 - i) * kPointerSize;
208 // Load parameter from stack.
209 __ lw(a0, MemOperand(fp, parameter_offset));
210 // Store it in the context.
211 MemOperand target = ContextMemOperand(cp, var->index());
212 __ sw(a0, target);
213 // Update the write barrier. This clobbers a3 and a0.
214 if (need_write_barrier) {
215 __ RecordWriteContextSlot(
216 cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
217 } else if (FLAG_debug_code) {
218 Label done;
219 __ JumpIfInNewSpace(cp, a0, &done);
220 __ Abort(kExpectedNewSpaceObject);
221 __ bind(&done);
222 }
223 }
224 }
225 Comment(";;; End allocate local context");
226 }
227
228 Comment(";;; Prologue end");
229}
230
231
232void LCodeGen::GenerateOsrPrologue() {
233 // Generate the OSR entry prologue at the first unknown OSR value, or if there
234 // are none, at the OSR entrypoint instruction.
235 if (osr_pc_offset_ >= 0) return;
236
237 osr_pc_offset_ = masm()->pc_offset();
238
239 // Adjust the frame size, subsuming the unoptimized frame into the
240 // optimized frame.
241 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
242 DCHECK(slots >= 0);
243 __ Subu(sp, sp, Operand(slots * kPointerSize));
244}
245
246
247void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
248 if (instr->IsCall()) {
249 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
250 }
251 if (!instr->IsLazyBailout() && !instr->IsGap()) {
252 safepoints_.BumpLastLazySafepointIndex();
253 }
254}
255
256
257bool LCodeGen::GenerateDeferredCode() {
258 DCHECK(is_generating());
259 if (deferred_.length() > 0) {
260 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
261 LDeferredCode* code = deferred_[i];
262
263 HValue* value =
264 instructions_->at(code->instruction_index())->hydrogen_value();
265 RecordAndWritePosition(
266 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
267
268 Comment(";;; <@%d,#%d> "
269 "-------------------- Deferred %s --------------------",
270 code->instruction_index(),
271 code->instr()->hydrogen_value()->id(),
272 code->instr()->Mnemonic());
273 __ bind(code->entry());
274 if (NeedsDeferredFrame()) {
275 Comment(";;; Build frame");
276 DCHECK(!frame_is_built_);
277 DCHECK(info()->IsStub());
278 frame_is_built_ = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000279 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
Ben Murdochda12d292016-06-02 14:46:10 +0100280 __ PushCommonFrame(scratch0());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000281 Comment(";;; Deferred code");
282 }
283 code->Generate();
284 if (NeedsDeferredFrame()) {
285 Comment(";;; Destroy frame");
286 DCHECK(frame_is_built_);
Ben Murdochda12d292016-06-02 14:46:10 +0100287 __ PopCommonFrame(scratch0());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000288 frame_is_built_ = false;
289 }
290 __ jmp(code->exit());
291 }
292 }
293 // Deferred code is the last part of the instruction sequence. Mark
294 // the generated code as done unless we bailed out.
295 if (!is_aborted()) status_ = DONE;
296 return !is_aborted();
297}
298
299
300bool LCodeGen::GenerateJumpTable() {
301 if (jump_table_.length() > 0) {
302 Label needs_frame, call_deopt_entry;
303
304 Comment(";;; -------------------- Jump table --------------------");
305 Address base = jump_table_[0].address;
306
307 Register entry_offset = t9;
308
309 int length = jump_table_.length();
310 for (int i = 0; i < length; i++) {
311 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
312 __ bind(&table_entry->label);
313
314 DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
315 Address entry = table_entry->address;
316 DeoptComment(table_entry->deopt_info);
317
318 // Second-level deopt table entries are contiguous and small, so instead
319 // of loading the full, absolute address of each one, load an immediate
320 // offset which will be added to the base address later.
321 __ li(entry_offset, Operand(entry - base));
322
323 if (table_entry->needs_frame) {
324 DCHECK(!info()->saves_caller_doubles());
325 Comment(";;; call deopt with frame");
Ben Murdochda12d292016-06-02 14:46:10 +0100326 __ PushCommonFrame();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000327 __ Call(&needs_frame);
328 } else {
329 __ Call(&call_deopt_entry);
330 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000331 }
332
333 if (needs_frame.is_linked()) {
334 __ bind(&needs_frame);
335 // This variant of deopt can only be used with stubs. Since we don't
336 // have a function pointer to install in the stack frame that we're
337 // building, install a special marker there instead.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000338 __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
339 __ push(at);
Ben Murdochda12d292016-06-02 14:46:10 +0100340 DCHECK(info()->IsStub());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000341 }
342
343 Comment(";;; call deopt");
344 __ bind(&call_deopt_entry);
345
346 if (info()->saves_caller_doubles()) {
347 DCHECK(info()->IsStub());
348 RestoreCallerDoubles();
349 }
350
351 // Add the base address to the offset previously loaded in entry_offset.
352 __ Addu(entry_offset, entry_offset,
353 Operand(ExternalReference::ForDeoptEntry(base)));
354 __ Jump(entry_offset);
355 }
356 __ RecordComment("]");
357
358 // The deoptimization jump table is the last part of the instruction
359 // sequence. Mark the generated code as done unless we bailed out.
360 if (!is_aborted()) status_ = DONE;
361 return !is_aborted();
362}
363
364
365bool LCodeGen::GenerateSafepointTable() {
366 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100367 safepoints_.Emit(masm(), GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000368 return !is_aborted();
369}
370
371
372Register LCodeGen::ToRegister(int index) const {
373 return Register::from_code(index);
374}
375
376
377DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
378 return DoubleRegister::from_code(index);
379}
380
381
382Register LCodeGen::ToRegister(LOperand* op) const {
383 DCHECK(op->IsRegister());
384 return ToRegister(op->index());
385}
386
387
388Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
389 if (op->IsRegister()) {
390 return ToRegister(op->index());
391 } else if (op->IsConstantOperand()) {
392 LConstantOperand* const_op = LConstantOperand::cast(op);
393 HConstant* constant = chunk_->LookupConstant(const_op);
394 Handle<Object> literal = constant->handle(isolate());
395 Representation r = chunk_->LookupLiteralRepresentation(const_op);
396 if (r.IsInteger32()) {
397 AllowDeferredHandleDereference get_number;
398 DCHECK(literal->IsNumber());
399 __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
400 } else if (r.IsSmi()) {
401 DCHECK(constant->HasSmiValue());
402 __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
403 } else if (r.IsDouble()) {
404 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
405 } else {
406 DCHECK(r.IsSmiOrTagged());
407 __ li(scratch, literal);
408 }
409 return scratch;
410 } else if (op->IsStackSlot()) {
411 __ lw(scratch, ToMemOperand(op));
412 return scratch;
413 }
414 UNREACHABLE();
415 return scratch;
416}
417
418
419DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
420 DCHECK(op->IsDoubleRegister());
421 return ToDoubleRegister(op->index());
422}
423
424
425DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
426 FloatRegister flt_scratch,
427 DoubleRegister dbl_scratch) {
428 if (op->IsDoubleRegister()) {
429 return ToDoubleRegister(op->index());
430 } else if (op->IsConstantOperand()) {
431 LConstantOperand* const_op = LConstantOperand::cast(op);
432 HConstant* constant = chunk_->LookupConstant(const_op);
433 Handle<Object> literal = constant->handle(isolate());
434 Representation r = chunk_->LookupLiteralRepresentation(const_op);
435 if (r.IsInteger32()) {
436 DCHECK(literal->IsNumber());
437 __ li(at, Operand(static_cast<int32_t>(literal->Number())));
438 __ mtc1(at, flt_scratch);
439 __ cvt_d_w(dbl_scratch, flt_scratch);
440 return dbl_scratch;
441 } else if (r.IsDouble()) {
442 Abort(kUnsupportedDoubleImmediate);
443 } else if (r.IsTagged()) {
444 Abort(kUnsupportedTaggedImmediate);
445 }
446 } else if (op->IsStackSlot()) {
447 MemOperand mem_op = ToMemOperand(op);
448 __ ldc1(dbl_scratch, mem_op);
449 return dbl_scratch;
450 }
451 UNREACHABLE();
452 return dbl_scratch;
453}
454
455
456Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
457 HConstant* constant = chunk_->LookupConstant(op);
458 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
459 return constant->handle(isolate());
460}
461
462
463bool LCodeGen::IsInteger32(LConstantOperand* op) const {
464 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
465}
466
467
468bool LCodeGen::IsSmi(LConstantOperand* op) const {
469 return chunk_->LookupLiteralRepresentation(op).IsSmi();
470}
471
472
473int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
474 return ToRepresentation(op, Representation::Integer32());
475}
476
477
478int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
479 const Representation& r) const {
480 HConstant* constant = chunk_->LookupConstant(op);
481 int32_t value = constant->Integer32Value();
482 if (r.IsInteger32()) return value;
483 DCHECK(r.IsSmiOrTagged());
484 return reinterpret_cast<int32_t>(Smi::FromInt(value));
485}
486
487
488Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
489 HConstant* constant = chunk_->LookupConstant(op);
490 return Smi::FromInt(constant->Integer32Value());
491}
492
493
494double LCodeGen::ToDouble(LConstantOperand* op) const {
495 HConstant* constant = chunk_->LookupConstant(op);
496 DCHECK(constant->HasDoubleValue());
497 return constant->DoubleValue();
498}
499
500
501Operand LCodeGen::ToOperand(LOperand* op) {
502 if (op->IsConstantOperand()) {
503 LConstantOperand* const_op = LConstantOperand::cast(op);
504 HConstant* constant = chunk()->LookupConstant(const_op);
505 Representation r = chunk_->LookupLiteralRepresentation(const_op);
506 if (r.IsSmi()) {
507 DCHECK(constant->HasSmiValue());
508 return Operand(Smi::FromInt(constant->Integer32Value()));
509 } else if (r.IsInteger32()) {
510 DCHECK(constant->HasInteger32Value());
511 return Operand(constant->Integer32Value());
512 } else if (r.IsDouble()) {
513 Abort(kToOperandUnsupportedDoubleImmediate);
514 }
515 DCHECK(r.IsTagged());
516 return Operand(constant->handle(isolate()));
517 } else if (op->IsRegister()) {
518 return Operand(ToRegister(op));
519 } else if (op->IsDoubleRegister()) {
520 Abort(kToOperandIsDoubleRegisterUnimplemented);
521 return Operand(0);
522 }
523 // Stack slots not implemented, use ToMemOperand instead.
524 UNREACHABLE();
525 return Operand(0);
526}
527
528
529static int ArgumentsOffsetWithoutFrame(int index) {
530 DCHECK(index < 0);
531 return -(index + 1) * kPointerSize;
532}
533
534
535MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
536 DCHECK(!op->IsRegister());
537 DCHECK(!op->IsDoubleRegister());
538 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
539 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100540 return MemOperand(fp, FrameSlotToFPOffset(op->index()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000541 } else {
542 // Retrieve parameter without eager stack-frame relative to the
543 // stack-pointer.
544 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
545 }
546}
547
548
549MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
550 DCHECK(op->IsDoubleStackSlot());
551 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100552 return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000553 } else {
554 // Retrieve parameter without eager stack-frame relative to the
555 // stack-pointer.
556 return MemOperand(
557 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
558 }
559}
560
561
562void LCodeGen::WriteTranslation(LEnvironment* environment,
563 Translation* translation) {
564 if (environment == NULL) return;
565
566 // The translation includes one command per value in the environment.
567 int translation_size = environment->translation_size();
568
569 WriteTranslation(environment->outer(), translation);
570 WriteTranslationFrame(environment, translation);
571
572 int object_index = 0;
573 int dematerialized_index = 0;
574 for (int i = 0; i < translation_size; ++i) {
575 LOperand* value = environment->values()->at(i);
576 AddToTranslation(
577 environment, translation, value, environment->HasTaggedValueAt(i),
578 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
579 }
580}
581
582
583void LCodeGen::AddToTranslation(LEnvironment* environment,
584 Translation* translation,
585 LOperand* op,
586 bool is_tagged,
587 bool is_uint32,
588 int* object_index_pointer,
589 int* dematerialized_index_pointer) {
590 if (op == LEnvironment::materialization_marker()) {
591 int object_index = (*object_index_pointer)++;
592 if (environment->ObjectIsDuplicateAt(object_index)) {
593 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
594 translation->DuplicateObject(dupe_of);
595 return;
596 }
597 int object_length = environment->ObjectLengthAt(object_index);
598 if (environment->ObjectIsArgumentsAt(object_index)) {
599 translation->BeginArgumentsObject(object_length);
600 } else {
601 translation->BeginCapturedObject(object_length);
602 }
603 int dematerialized_index = *dematerialized_index_pointer;
604 int env_offset = environment->translation_size() + dematerialized_index;
605 *dematerialized_index_pointer += object_length;
606 for (int i = 0; i < object_length; ++i) {
607 LOperand* value = environment->values()->at(env_offset + i);
608 AddToTranslation(environment,
609 translation,
610 value,
611 environment->HasTaggedValueAt(env_offset + i),
612 environment->HasUint32ValueAt(env_offset + i),
613 object_index_pointer,
614 dematerialized_index_pointer);
615 }
616 return;
617 }
618
619 if (op->IsStackSlot()) {
620 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000621 if (is_tagged) {
622 translation->StoreStackSlot(index);
623 } else if (is_uint32) {
624 translation->StoreUint32StackSlot(index);
625 } else {
626 translation->StoreInt32StackSlot(index);
627 }
628 } else if (op->IsDoubleStackSlot()) {
629 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000630 translation->StoreDoubleStackSlot(index);
631 } else if (op->IsRegister()) {
632 Register reg = ToRegister(op);
633 if (is_tagged) {
634 translation->StoreRegister(reg);
635 } else if (is_uint32) {
636 translation->StoreUint32Register(reg);
637 } else {
638 translation->StoreInt32Register(reg);
639 }
640 } else if (op->IsDoubleRegister()) {
641 DoubleRegister reg = ToDoubleRegister(op);
642 translation->StoreDoubleRegister(reg);
643 } else if (op->IsConstantOperand()) {
644 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
645 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
646 translation->StoreLiteral(src_index);
647 } else {
648 UNREACHABLE();
649 }
650}
651
652
653void LCodeGen::CallCode(Handle<Code> code,
654 RelocInfo::Mode mode,
655 LInstruction* instr) {
656 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
657}
658
659
660void LCodeGen::CallCodeGeneric(Handle<Code> code,
661 RelocInfo::Mode mode,
662 LInstruction* instr,
663 SafepointMode safepoint_mode) {
664 DCHECK(instr != NULL);
665 __ Call(code, mode);
666 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
667}
668
669
670void LCodeGen::CallRuntime(const Runtime::Function* function,
671 int num_arguments,
672 LInstruction* instr,
673 SaveFPRegsMode save_doubles) {
674 DCHECK(instr != NULL);
675
676 __ CallRuntime(function, num_arguments, save_doubles);
677
678 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
679}
680
681
682void LCodeGen::LoadContextFromDeferred(LOperand* context) {
683 if (context->IsRegister()) {
684 __ Move(cp, ToRegister(context));
685 } else if (context->IsStackSlot()) {
686 __ lw(cp, ToMemOperand(context));
687 } else if (context->IsConstantOperand()) {
688 HConstant* constant =
689 chunk_->LookupConstant(LConstantOperand::cast(context));
690 __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
691 } else {
692 UNREACHABLE();
693 }
694}
695
696
697void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
698 int argc,
699 LInstruction* instr,
700 LOperand* context) {
701 LoadContextFromDeferred(context);
702 __ CallRuntimeSaveDoubles(id);
703 RecordSafepointWithRegisters(
704 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
705}
706
707
708void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
709 Safepoint::DeoptMode mode) {
710 environment->set_has_been_used();
711 if (!environment->HasBeenRegistered()) {
712 // Physical stack frame layout:
713 // -x ............. -4 0 ..................................... y
714 // [incoming arguments] [spill slots] [pushed outgoing arguments]
715
716 // Layout of the environment:
717 // 0 ..................................................... size-1
718 // [parameters] [locals] [expression stack including arguments]
719
720 // Layout of the translation:
721 // 0 ........................................................ size - 1 + 4
722 // [expression stack including arguments] [locals] [4 words] [parameters]
723 // |>------------ translation_size ------------<|
724
725 int frame_count = 0;
726 int jsframe_count = 0;
727 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
728 ++frame_count;
729 if (e->frame_type() == JS_FUNCTION) {
730 ++jsframe_count;
731 }
732 }
733 Translation translation(&translations_, frame_count, jsframe_count, zone());
734 WriteTranslation(environment, &translation);
735 int deoptimization_index = deoptimizations_.length();
736 int pc_offset = masm()->pc_offset();
737 environment->Register(deoptimization_index,
738 translation.index(),
739 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
740 deoptimizations_.Add(environment, zone());
741 }
742}
743
744
745void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
746 Deoptimizer::DeoptReason deopt_reason,
747 Deoptimizer::BailoutType bailout_type,
748 Register src1, const Operand& src2) {
749 LEnvironment* environment = instr->environment();
750 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
751 DCHECK(environment->HasBeenRegistered());
752 int id = environment->deoptimization_index();
753 Address entry =
754 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
755 if (entry == NULL) {
756 Abort(kBailoutWasNotPrepared);
757 return;
758 }
759
760 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
761 Register scratch = scratch0();
762 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
763 Label no_deopt;
764 __ Push(a1, scratch);
765 __ li(scratch, Operand(count));
766 __ lw(a1, MemOperand(scratch));
767 __ Subu(a1, a1, Operand(1));
768 __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
769 __ li(a1, Operand(FLAG_deopt_every_n_times));
770 __ sw(a1, MemOperand(scratch));
771 __ Pop(a1, scratch);
772
773 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
774 __ bind(&no_deopt);
775 __ sw(a1, MemOperand(scratch));
776 __ Pop(a1, scratch);
777 }
778
779 if (info()->ShouldTrapOnDeopt()) {
780 Label skip;
781 if (condition != al) {
782 __ Branch(&skip, NegateCondition(condition), src1, src2);
783 }
784 __ stop("trap_on_deopt");
785 __ bind(&skip);
786 }
787
Ben Murdochc5610432016-08-08 18:44:38 +0100788 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000789
790 DCHECK(info()->IsStub() || frame_is_built_);
791 // Go through jump table if we need to handle condition, build frame, or
792 // restore caller doubles.
793 if (condition == al && frame_is_built_ &&
794 !info()->saves_caller_doubles()) {
795 DeoptComment(deopt_info);
796 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000797 } else {
798 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
799 !frame_is_built_);
800 // We often have several deopts to the same entry, reuse the last
801 // jump entry if this is the case.
Ben Murdoch61f157c2016-09-16 13:49:30 +0100802 if (FLAG_trace_deopt || isolate()->is_profiling() ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000803 jump_table_.is_empty() ||
804 !table_entry.IsEquivalentTo(jump_table_.last())) {
805 jump_table_.Add(table_entry, zone());
806 }
807 __ Branch(&jump_table_.last().label, condition, src1, src2);
808 }
809}
810
811
812void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
813 Deoptimizer::DeoptReason deopt_reason,
814 Register src1, const Operand& src2) {
815 Deoptimizer::BailoutType bailout_type = info()->IsStub()
816 ? Deoptimizer::LAZY
817 : Deoptimizer::EAGER;
818 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
819}
820
821
822void LCodeGen::RecordSafepointWithLazyDeopt(
823 LInstruction* instr, SafepointMode safepoint_mode) {
824 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
825 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
826 } else {
827 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
828 RecordSafepointWithRegisters(
829 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
830 }
831}
832
833
834void LCodeGen::RecordSafepoint(
835 LPointerMap* pointers,
836 Safepoint::Kind kind,
837 int arguments,
838 Safepoint::DeoptMode deopt_mode) {
839 DCHECK(expected_safepoint_kind_ == kind);
840
841 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
842 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
843 kind, arguments, deopt_mode);
844 for (int i = 0; i < operands->length(); i++) {
845 LOperand* pointer = operands->at(i);
846 if (pointer->IsStackSlot()) {
847 safepoint.DefinePointerSlot(pointer->index(), zone());
848 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
849 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
850 }
851 }
852}
853
854
855void LCodeGen::RecordSafepoint(LPointerMap* pointers,
856 Safepoint::DeoptMode deopt_mode) {
857 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
858}
859
860
861void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
862 LPointerMap empty_pointers(zone());
863 RecordSafepoint(&empty_pointers, deopt_mode);
864}
865
866
867void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
868 int arguments,
869 Safepoint::DeoptMode deopt_mode) {
870 RecordSafepoint(
871 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
872}
873
874
875void LCodeGen::RecordAndWritePosition(int position) {
876 if (position == RelocInfo::kNoPosition) return;
877 masm()->positions_recorder()->RecordPosition(position);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000878}
879
880
881static const char* LabelType(LLabel* label) {
882 if (label->is_loop_header()) return " (loop header)";
883 if (label->is_osr_entry()) return " (OSR entry)";
884 return "";
885}
886
887
888void LCodeGen::DoLabel(LLabel* label) {
889 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
890 current_instruction_,
891 label->hydrogen_value()->id(),
892 label->block_id(),
893 LabelType(label));
894 __ bind(label->label());
895 current_block_ = label->block_id();
896 DoGap(label);
897}
898
899
900void LCodeGen::DoParallelMove(LParallelMove* move) {
901 resolver_.Resolve(move);
902}
903
904
905void LCodeGen::DoGap(LGap* gap) {
906 for (int i = LGap::FIRST_INNER_POSITION;
907 i <= LGap::LAST_INNER_POSITION;
908 i++) {
909 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
910 LParallelMove* move = gap->GetParallelMove(inner_pos);
911 if (move != NULL) DoParallelMove(move);
912 }
913}
914
915
916void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
917 DoGap(instr);
918}
919
920
921void LCodeGen::DoParameter(LParameter* instr) {
922 // Nothing to do.
923}
924
925
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000926void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
927 GenerateOsrPrologue();
928}
929
930
931void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
932 Register dividend = ToRegister(instr->dividend());
933 int32_t divisor = instr->divisor();
934 DCHECK(dividend.is(ToRegister(instr->result())));
935
936 // Theoretically, a variation of the branch-free code for integer division by
937 // a power of 2 (calculating the remainder via an additional multiplication
938 // (which gets simplified to an 'and') and subtraction) should be faster, and
939 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
940 // indicate that positive dividends are heavily favored, so the branching
941 // version performs better.
942 HMod* hmod = instr->hydrogen();
943 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
944 Label dividend_is_not_negative, done;
945
946 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
947 __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
948 // Note: The code below even works when right contains kMinInt.
949 __ subu(dividend, zero_reg, dividend);
950 __ And(dividend, dividend, Operand(mask));
951 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
952 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
953 Operand(zero_reg));
954 }
955 __ Branch(USE_DELAY_SLOT, &done);
956 __ subu(dividend, zero_reg, dividend);
957 }
958
959 __ bind(&dividend_is_not_negative);
960 __ And(dividend, dividend, Operand(mask));
961 __ bind(&done);
962}
963
964
965void LCodeGen::DoModByConstI(LModByConstI* instr) {
966 Register dividend = ToRegister(instr->dividend());
967 int32_t divisor = instr->divisor();
968 Register result = ToRegister(instr->result());
969 DCHECK(!dividend.is(result));
970
971 if (divisor == 0) {
972 DeoptimizeIf(al, instr);
973 return;
974 }
975
976 __ TruncatingDiv(result, dividend, Abs(divisor));
977 __ Mul(result, result, Operand(Abs(divisor)));
978 __ Subu(result, dividend, Operand(result));
979
980 // Check for negative zero.
981 HMod* hmod = instr->hydrogen();
982 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
983 Label remainder_not_zero;
984 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
985 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
986 Operand(zero_reg));
987 __ bind(&remainder_not_zero);
988 }
989}
990
991
992void LCodeGen::DoModI(LModI* instr) {
993 HMod* hmod = instr->hydrogen();
994 const Register left_reg = ToRegister(instr->left());
995 const Register right_reg = ToRegister(instr->right());
996 const Register result_reg = ToRegister(instr->result());
997
998 // div runs in the background while we check for special cases.
999 __ Mod(result_reg, left_reg, right_reg);
1000
1001 Label done;
1002 // Check for x % 0, we have to deopt in this case because we can't return a
1003 // NaN.
1004 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1005 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
1006 Operand(zero_reg));
1007 }
1008
1009 // Check for kMinInt % -1, div will return kMinInt, which is not what we
1010 // want. We have to deopt if we care about -0, because we can't return that.
1011 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1012 Label no_overflow_possible;
1013 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1014 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1015 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
1016 } else {
1017 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1018 __ Branch(USE_DELAY_SLOT, &done);
1019 __ mov(result_reg, zero_reg);
1020 }
1021 __ bind(&no_overflow_possible);
1022 }
1023
1024 // If we care about -0, test if the dividend is <0 and the result is 0.
1025 __ Branch(&done, ge, left_reg, Operand(zero_reg));
1026 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1027 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
1028 Operand(zero_reg));
1029 }
1030 __ bind(&done);
1031}
1032
1033
1034void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1035 Register dividend = ToRegister(instr->dividend());
1036 int32_t divisor = instr->divisor();
1037 Register result = ToRegister(instr->result());
1038 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1039 DCHECK(!result.is(dividend));
1040
1041 // Check for (0 / -x) that will produce negative zero.
1042 HDiv* hdiv = instr->hydrogen();
1043 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1044 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1045 Operand(zero_reg));
1046 }
1047 // Check for (kMinInt / -1).
1048 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1049 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
1050 }
1051 // Deoptimize if remainder will not be 0.
1052 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1053 divisor != 1 && divisor != -1) {
1054 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1055 __ And(at, dividend, Operand(mask));
1056 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
1057 }
1058
1059 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1060 __ Subu(result, zero_reg, dividend);
1061 return;
1062 }
1063 uint16_t shift = WhichPowerOf2Abs(divisor);
1064 if (shift == 0) {
1065 __ Move(result, dividend);
1066 } else if (shift == 1) {
1067 __ srl(result, dividend, 31);
1068 __ Addu(result, dividend, Operand(result));
1069 } else {
1070 __ sra(result, dividend, 31);
1071 __ srl(result, result, 32 - shift);
1072 __ Addu(result, dividend, Operand(result));
1073 }
1074 if (shift > 0) __ sra(result, result, shift);
1075 if (divisor < 0) __ Subu(result, zero_reg, result);
1076}
1077
1078
1079void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1080 Register dividend = ToRegister(instr->dividend());
1081 int32_t divisor = instr->divisor();
1082 Register result = ToRegister(instr->result());
1083 DCHECK(!dividend.is(result));
1084
1085 if (divisor == 0) {
1086 DeoptimizeIf(al, instr);
1087 return;
1088 }
1089
1090 // Check for (0 / -x) that will produce negative zero.
1091 HDiv* hdiv = instr->hydrogen();
1092 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1093 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1094 Operand(zero_reg));
1095 }
1096
1097 __ TruncatingDiv(result, dividend, Abs(divisor));
1098 if (divisor < 0) __ Subu(result, zero_reg, result);
1099
1100 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1101 __ Mul(scratch0(), result, Operand(divisor));
1102 __ Subu(scratch0(), scratch0(), dividend);
1103 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
1104 Operand(zero_reg));
1105 }
1106}
1107
1108
1109// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1110void LCodeGen::DoDivI(LDivI* instr) {
1111 HBinaryOperation* hdiv = instr->hydrogen();
1112 Register dividend = ToRegister(instr->dividend());
1113 Register divisor = ToRegister(instr->divisor());
1114 const Register result = ToRegister(instr->result());
1115 Register remainder = ToRegister(instr->temp());
1116
1117 // On MIPS div is asynchronous - it will run in the background while we
1118 // check for special cases.
1119 __ Div(remainder, result, dividend, divisor);
1120
1121 // Check for x / 0.
1122 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1123 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1124 Operand(zero_reg));
1125 }
1126
1127 // Check for (0 / -x) that will produce negative zero.
1128 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1129 Label left_not_zero;
1130 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1131 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1132 Operand(zero_reg));
1133 __ bind(&left_not_zero);
1134 }
1135
1136 // Check for (kMinInt / -1).
1137 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1138 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1139 Label left_not_min_int;
1140 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1141 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1142 __ bind(&left_not_min_int);
1143 }
1144
1145 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1146 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
1147 Operand(zero_reg));
1148 }
1149}
1150
1151
1152void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1153 DoubleRegister addend = ToDoubleRegister(instr->addend());
1154 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1155 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1156
1157 // This is computed in-place.
1158 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1159
1160 __ madd_d(addend, addend, multiplier, multiplicand);
1161}
1162
1163
1164void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1165 Register dividend = ToRegister(instr->dividend());
1166 Register result = ToRegister(instr->result());
1167 int32_t divisor = instr->divisor();
1168 Register scratch = result.is(dividend) ? scratch0() : dividend;
1169 DCHECK(!result.is(dividend) || !scratch.is(dividend));
1170
1171 // If the divisor is 1, return the dividend.
1172 if (divisor == 1) {
1173 __ Move(result, dividend);
1174 return;
1175 }
1176
1177 // If the divisor is positive, things are easy: There can be no deopts and we
1178 // can simply do an arithmetic right shift.
1179 uint16_t shift = WhichPowerOf2Abs(divisor);
1180 if (divisor > 1) {
1181 __ sra(result, dividend, shift);
1182 return;
1183 }
1184
1185 // If the divisor is negative, we have to negate and handle edge cases.
1186
1187 // dividend can be the same register as result so save the value of it
1188 // for checking overflow.
1189 __ Move(scratch, dividend);
1190
1191 __ Subu(result, zero_reg, dividend);
1192 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1193 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
1194 }
1195
1196 // Dividing by -1 is basically negation, unless we overflow.
1197 __ Xor(scratch, scratch, result);
1198 if (divisor == -1) {
1199 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1200 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
1201 Operand(zero_reg));
1202 }
1203 return;
1204 }
1205
1206 // If the negation could not overflow, simply shifting is OK.
1207 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1208 __ sra(result, result, shift);
1209 return;
1210 }
1211
1212 Label no_overflow, done;
1213 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
1214 __ li(result, Operand(kMinInt / divisor));
1215 __ Branch(&done);
1216 __ bind(&no_overflow);
1217 __ sra(result, result, shift);
1218 __ bind(&done);
1219}
1220
1221
1222void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1223 Register dividend = ToRegister(instr->dividend());
1224 int32_t divisor = instr->divisor();
1225 Register result = ToRegister(instr->result());
1226 DCHECK(!dividend.is(result));
1227
1228 if (divisor == 0) {
1229 DeoptimizeIf(al, instr);
1230 return;
1231 }
1232
1233 // Check for (0 / -x) that will produce negative zero.
1234 HMathFloorOfDiv* hdiv = instr->hydrogen();
1235 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1236 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1237 Operand(zero_reg));
1238 }
1239
1240 // Easy case: We need no dynamic check for the dividend and the flooring
1241 // division is the same as the truncating division.
1242 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1243 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1244 __ TruncatingDiv(result, dividend, Abs(divisor));
1245 if (divisor < 0) __ Subu(result, zero_reg, result);
1246 return;
1247 }
1248
1249 // In the general case we may need to adjust before and after the truncating
1250 // division to get a flooring division.
1251 Register temp = ToRegister(instr->temp());
1252 DCHECK(!temp.is(dividend) && !temp.is(result));
1253 Label needs_adjustment, done;
1254 __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1255 dividend, Operand(zero_reg));
1256 __ TruncatingDiv(result, dividend, Abs(divisor));
1257 if (divisor < 0) __ Subu(result, zero_reg, result);
1258 __ jmp(&done);
1259 __ bind(&needs_adjustment);
1260 __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1261 __ TruncatingDiv(result, temp, Abs(divisor));
1262 if (divisor < 0) __ Subu(result, zero_reg, result);
1263 __ Subu(result, result, Operand(1));
1264 __ bind(&done);
1265}
1266
1267
1268// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1269void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1270 HBinaryOperation* hdiv = instr->hydrogen();
1271 Register dividend = ToRegister(instr->dividend());
1272 Register divisor = ToRegister(instr->divisor());
1273 const Register result = ToRegister(instr->result());
1274 Register remainder = scratch0();
1275 // On MIPS div is asynchronous - it will run in the background while we
1276 // check for special cases.
1277 __ Div(remainder, result, dividend, divisor);
1278
1279 // Check for x / 0.
1280 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1281 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1282 Operand(zero_reg));
1283 }
1284
1285 // Check for (0 / -x) that will produce negative zero.
1286 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1287 Label left_not_zero;
1288 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1289 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1290 Operand(zero_reg));
1291 __ bind(&left_not_zero);
1292 }
1293
1294 // Check for (kMinInt / -1).
1295 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1296 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1297 Label left_not_min_int;
1298 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1299 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1300 __ bind(&left_not_min_int);
1301 }
1302
1303 // We performed a truncating division. Correct the result if necessary.
1304 Label done;
1305 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1306 __ Xor(remainder, remainder, Operand(divisor));
1307 __ Branch(&done, ge, remainder, Operand(zero_reg));
1308 __ Subu(result, result, Operand(1));
1309 __ bind(&done);
1310}
1311
1312
1313void LCodeGen::DoMulI(LMulI* instr) {
1314 Register scratch = scratch0();
1315 Register result = ToRegister(instr->result());
1316 // Note that result may alias left.
1317 Register left = ToRegister(instr->left());
1318 LOperand* right_op = instr->right();
1319
1320 bool bailout_on_minus_zero =
1321 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1322 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1323
1324 if (right_op->IsConstantOperand()) {
1325 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1326
1327 if (bailout_on_minus_zero && (constant < 0)) {
1328 // The case of a null constant will be handled separately.
1329 // If constant is negative and left is null, the result should be -0.
1330 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
1331 }
1332
1333 switch (constant) {
1334 case -1:
1335 if (overflow) {
1336 Label no_overflow;
1337 __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
1338 DeoptimizeIf(al, instr);
1339 __ bind(&no_overflow);
1340 } else {
1341 __ Subu(result, zero_reg, left);
1342 }
1343 break;
1344 case 0:
1345 if (bailout_on_minus_zero) {
1346 // If left is strictly negative and the constant is null, the
1347 // result is -0. Deoptimize if required, otherwise return 0.
1348 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
1349 Operand(zero_reg));
1350 }
1351 __ mov(result, zero_reg);
1352 break;
1353 case 1:
1354 // Nothing to do.
1355 __ Move(result, left);
1356 break;
1357 default:
1358 // Multiplying by powers of two and powers of two plus or minus
1359 // one can be done faster with shifted operands.
1360 // For other constants we emit standard code.
1361 int32_t mask = constant >> 31;
1362 uint32_t constant_abs = (constant + mask) ^ mask;
1363
1364 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1365 int32_t shift = WhichPowerOf2(constant_abs);
1366 __ sll(result, left, shift);
1367 // Correct the sign of the result if the constant is negative.
1368 if (constant < 0) __ Subu(result, zero_reg, result);
1369 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1370 int32_t shift = WhichPowerOf2(constant_abs - 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001371 __ Lsa(result, left, left, shift);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001372 // Correct the sign of the result if the constant is negative.
1373 if (constant < 0) __ Subu(result, zero_reg, result);
1374 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1375 int32_t shift = WhichPowerOf2(constant_abs + 1);
1376 __ sll(scratch, left, shift);
1377 __ Subu(result, scratch, left);
1378 // Correct the sign of the result if the constant is negative.
1379 if (constant < 0) __ Subu(result, zero_reg, result);
1380 } else {
1381 // Generate standard code.
1382 __ li(at, constant);
1383 __ Mul(result, left, at);
1384 }
1385 }
1386
1387 } else {
1388 DCHECK(right_op->IsRegister());
1389 Register right = ToRegister(right_op);
1390
1391 if (overflow) {
1392 // hi:lo = left * right.
1393 if (instr->hydrogen()->representation().IsSmi()) {
1394 __ SmiUntag(result, left);
1395 __ Mul(scratch, result, result, right);
1396 } else {
1397 __ Mul(scratch, result, left, right);
1398 }
1399 __ sra(at, result, 31);
1400 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
1401 } else {
1402 if (instr->hydrogen()->representation().IsSmi()) {
1403 __ SmiUntag(result, left);
1404 __ Mul(result, result, right);
1405 } else {
1406 __ Mul(result, left, right);
1407 }
1408 }
1409
1410 if (bailout_on_minus_zero) {
1411 Label done;
1412 __ Xor(at, left, right);
1413 __ Branch(&done, ge, at, Operand(zero_reg));
1414 // Bail out if the result is minus zero.
1415 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
1416 Operand(zero_reg));
1417 __ bind(&done);
1418 }
1419 }
1420}
1421
1422
1423void LCodeGen::DoBitI(LBitI* instr) {
1424 LOperand* left_op = instr->left();
1425 LOperand* right_op = instr->right();
1426 DCHECK(left_op->IsRegister());
1427 Register left = ToRegister(left_op);
1428 Register result = ToRegister(instr->result());
1429 Operand right(no_reg);
1430
1431 if (right_op->IsStackSlot()) {
1432 right = Operand(EmitLoadRegister(right_op, at));
1433 } else {
1434 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1435 right = ToOperand(right_op);
1436 }
1437
1438 switch (instr->op()) {
1439 case Token::BIT_AND:
1440 __ And(result, left, right);
1441 break;
1442 case Token::BIT_OR:
1443 __ Or(result, left, right);
1444 break;
1445 case Token::BIT_XOR:
1446 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1447 __ Nor(result, zero_reg, left);
1448 } else {
1449 __ Xor(result, left, right);
1450 }
1451 break;
1452 default:
1453 UNREACHABLE();
1454 break;
1455 }
1456}
1457
1458
1459void LCodeGen::DoShiftI(LShiftI* instr) {
1460 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1461 // result may alias either of them.
1462 LOperand* right_op = instr->right();
1463 Register left = ToRegister(instr->left());
1464 Register result = ToRegister(instr->result());
1465 Register scratch = scratch0();
1466
1467 if (right_op->IsRegister()) {
1468 // No need to mask the right operand on MIPS, it is built into the variable
1469 // shift instructions.
1470 switch (instr->op()) {
1471 case Token::ROR:
1472 __ Ror(result, left, Operand(ToRegister(right_op)));
1473 break;
1474 case Token::SAR:
1475 __ srav(result, left, ToRegister(right_op));
1476 break;
1477 case Token::SHR:
1478 __ srlv(result, left, ToRegister(right_op));
1479 if (instr->can_deopt()) {
1480 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
1481 Operand(zero_reg));
1482 }
1483 break;
1484 case Token::SHL:
1485 __ sllv(result, left, ToRegister(right_op));
1486 break;
1487 default:
1488 UNREACHABLE();
1489 break;
1490 }
1491 } else {
1492 // Mask the right_op operand.
1493 int value = ToInteger32(LConstantOperand::cast(right_op));
1494 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1495 switch (instr->op()) {
1496 case Token::ROR:
1497 if (shift_count != 0) {
1498 __ Ror(result, left, Operand(shift_count));
1499 } else {
1500 __ Move(result, left);
1501 }
1502 break;
1503 case Token::SAR:
1504 if (shift_count != 0) {
1505 __ sra(result, left, shift_count);
1506 } else {
1507 __ Move(result, left);
1508 }
1509 break;
1510 case Token::SHR:
1511 if (shift_count != 0) {
1512 __ srl(result, left, shift_count);
1513 } else {
1514 if (instr->can_deopt()) {
1515 __ And(at, left, Operand(0x80000000));
1516 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
1517 Operand(zero_reg));
1518 }
1519 __ Move(result, left);
1520 }
1521 break;
1522 case Token::SHL:
1523 if (shift_count != 0) {
1524 if (instr->hydrogen_value()->representation().IsSmi() &&
1525 instr->can_deopt()) {
1526 if (shift_count != 1) {
1527 __ sll(result, left, shift_count - 1);
1528 __ SmiTagCheckOverflow(result, result, scratch);
1529 } else {
1530 __ SmiTagCheckOverflow(result, left, scratch);
1531 }
1532 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
1533 Operand(zero_reg));
1534 } else {
1535 __ sll(result, left, shift_count);
1536 }
1537 } else {
1538 __ Move(result, left);
1539 }
1540 break;
1541 default:
1542 UNREACHABLE();
1543 break;
1544 }
1545 }
1546}
1547
1548
1549void LCodeGen::DoSubI(LSubI* instr) {
1550 LOperand* left = instr->left();
1551 LOperand* right = instr->right();
1552 LOperand* result = instr->result();
1553 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1554
1555 if (!can_overflow) {
1556 if (right->IsStackSlot()) {
1557 Register right_reg = EmitLoadRegister(right, at);
1558 __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1559 } else {
1560 DCHECK(right->IsRegister() || right->IsConstantOperand());
1561 __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1562 }
1563 } else { // can_overflow.
1564 Register scratch = scratch0();
1565 Label no_overflow_label;
1566 if (right->IsStackSlot()) {
1567 Register right_reg = EmitLoadRegister(right, scratch);
1568 __ SubBranchNoOvf(ToRegister(result), ToRegister(left),
1569 Operand(right_reg), &no_overflow_label);
1570 } else {
1571 DCHECK(right->IsRegister() || right->IsConstantOperand());
1572 __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1573 &no_overflow_label, scratch);
1574 }
1575 DeoptimizeIf(al, instr);
1576 __ bind(&no_overflow_label);
1577 }
1578}
1579
1580
1581void LCodeGen::DoConstantI(LConstantI* instr) {
1582 __ li(ToRegister(instr->result()), Operand(instr->value()));
1583}
1584
1585
1586void LCodeGen::DoConstantS(LConstantS* instr) {
1587 __ li(ToRegister(instr->result()), Operand(instr->value()));
1588}
1589
1590
1591void LCodeGen::DoConstantD(LConstantD* instr) {
1592 DCHECK(instr->result()->IsDoubleRegister());
1593 DoubleRegister result = ToDoubleRegister(instr->result());
1594 double v = instr->value();
1595 __ Move(result, v);
1596}
1597
1598
1599void LCodeGen::DoConstantE(LConstantE* instr) {
1600 __ li(ToRegister(instr->result()), Operand(instr->value()));
1601}
1602
1603
1604void LCodeGen::DoConstantT(LConstantT* instr) {
1605 Handle<Object> object = instr->value(isolate());
1606 AllowDeferredHandleDereference smi_check;
1607 __ li(ToRegister(instr->result()), object);
1608}
1609
1610
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001611MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1612 LOperand* index,
1613 String::Encoding encoding) {
1614 if (index->IsConstantOperand()) {
1615 int offset = ToInteger32(LConstantOperand::cast(index));
1616 if (encoding == String::TWO_BYTE_ENCODING) {
1617 offset *= kUC16Size;
1618 }
1619 STATIC_ASSERT(kCharSize == 1);
1620 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1621 }
1622 Register scratch = scratch0();
1623 DCHECK(!scratch.is(string));
1624 DCHECK(!scratch.is(ToRegister(index)));
1625 if (encoding == String::ONE_BYTE_ENCODING) {
1626 __ Addu(scratch, string, ToRegister(index));
1627 } else {
1628 STATIC_ASSERT(kUC16Size == 2);
1629 __ sll(scratch, ToRegister(index), 1);
1630 __ Addu(scratch, string, scratch);
1631 }
1632 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1633}
1634
1635
1636void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1637 String::Encoding encoding = instr->hydrogen()->encoding();
1638 Register string = ToRegister(instr->string());
1639 Register result = ToRegister(instr->result());
1640
1641 if (FLAG_debug_code) {
1642 Register scratch = scratch0();
1643 __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1644 __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1645
1646 __ And(scratch, scratch,
1647 Operand(kStringRepresentationMask | kStringEncodingMask));
1648 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1649 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1650 __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1651 ? one_byte_seq_type : two_byte_seq_type));
1652 __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1653 }
1654
1655 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1656 if (encoding == String::ONE_BYTE_ENCODING) {
1657 __ lbu(result, operand);
1658 } else {
1659 __ lhu(result, operand);
1660 }
1661}
1662
1663
1664void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1665 String::Encoding encoding = instr->hydrogen()->encoding();
1666 Register string = ToRegister(instr->string());
1667 Register value = ToRegister(instr->value());
1668
1669 if (FLAG_debug_code) {
1670 Register scratch = scratch0();
1671 Register index = ToRegister(instr->index());
1672 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1673 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1674 int encoding_mask =
1675 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1676 ? one_byte_seq_type : two_byte_seq_type;
1677 __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1678 }
1679
1680 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1681 if (encoding == String::ONE_BYTE_ENCODING) {
1682 __ sb(value, operand);
1683 } else {
1684 __ sh(value, operand);
1685 }
1686}
1687
1688
1689void LCodeGen::DoAddI(LAddI* instr) {
1690 LOperand* left = instr->left();
1691 LOperand* right = instr->right();
1692 LOperand* result = instr->result();
1693 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1694
1695 if (!can_overflow) {
1696 if (right->IsStackSlot()) {
1697 Register right_reg = EmitLoadRegister(right, at);
1698 __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1699 } else {
1700 DCHECK(right->IsRegister() || right->IsConstantOperand());
1701 __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1702 }
1703 } else { // can_overflow.
1704 Register scratch = scratch1();
1705 Label no_overflow_label;
1706 if (right->IsStackSlot()) {
1707 Register right_reg = EmitLoadRegister(right, scratch);
1708 __ AddBranchNoOvf(ToRegister(result), ToRegister(left),
1709 Operand(right_reg), &no_overflow_label);
1710 } else {
1711 DCHECK(right->IsRegister() || right->IsConstantOperand());
1712 __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1713 &no_overflow_label, scratch);
1714 }
1715 DeoptimizeIf(al, instr);
1716 __ bind(&no_overflow_label);
1717 }
1718}
1719
1720
1721void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1722 LOperand* left = instr->left();
1723 LOperand* right = instr->right();
1724 HMathMinMax::Operation operation = instr->hydrogen()->operation();
Ben Murdochc5610432016-08-08 18:44:38 +01001725 Register scratch = scratch1();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001726 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
Ben Murdochc5610432016-08-08 18:44:38 +01001727 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001728 Register left_reg = ToRegister(left);
1729 Register right_reg = EmitLoadRegister(right, scratch0());
1730 Register result_reg = ToRegister(instr->result());
1731 Label return_right, done;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001732 __ Slt(scratch, left_reg, Operand(right_reg));
1733 if (condition == ge) {
1734 __ Movz(result_reg, left_reg, scratch);
1735 __ Movn(result_reg, right_reg, scratch);
1736 } else {
1737 DCHECK(condition == le);
1738 __ Movn(result_reg, left_reg, scratch);
1739 __ Movz(result_reg, right_reg, scratch);
1740 }
1741 } else {
1742 DCHECK(instr->hydrogen()->representation().IsDouble());
1743 FPURegister left_reg = ToDoubleRegister(left);
1744 FPURegister right_reg = ToDoubleRegister(right);
1745 FPURegister result_reg = ToDoubleRegister(instr->result());
Ben Murdochc5610432016-08-08 18:44:38 +01001746 Label nan, done;
1747 if (operation == HMathMinMax::kMathMax) {
1748 __ MaxNaNCheck_d(result_reg, left_reg, right_reg, &nan);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001749 } else {
Ben Murdochc5610432016-08-08 18:44:38 +01001750 DCHECK(operation == HMathMinMax::kMathMin);
1751 __ MinNaNCheck_d(result_reg, left_reg, right_reg, &nan);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001752 }
1753 __ Branch(&done);
1754
Ben Murdochc5610432016-08-08 18:44:38 +01001755 __ bind(&nan);
1756 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
1757 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001758
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001759 __ bind(&done);
1760 }
1761}
1762
1763
1764void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1765 DoubleRegister left = ToDoubleRegister(instr->left());
1766 DoubleRegister right = ToDoubleRegister(instr->right());
1767 DoubleRegister result = ToDoubleRegister(instr->result());
1768 switch (instr->op()) {
1769 case Token::ADD:
1770 __ add_d(result, left, right);
1771 break;
1772 case Token::SUB:
1773 __ sub_d(result, left, right);
1774 break;
1775 case Token::MUL:
1776 __ mul_d(result, left, right);
1777 break;
1778 case Token::DIV:
1779 __ div_d(result, left, right);
1780 break;
1781 case Token::MOD: {
1782 // Save a0-a3 on the stack.
1783 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1784 __ MultiPush(saved_regs);
1785
1786 __ PrepareCallCFunction(0, 2, scratch0());
1787 __ MovToFloatParameters(left, right);
1788 __ CallCFunction(
1789 ExternalReference::mod_two_doubles_operation(isolate()),
1790 0, 2);
1791 // Move the result in the double result register.
1792 __ MovFromFloatResult(result);
1793
1794 // Restore saved register.
1795 __ MultiPop(saved_regs);
1796 break;
1797 }
1798 default:
1799 UNREACHABLE();
1800 break;
1801 }
1802}
1803
1804
1805void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1806 DCHECK(ToRegister(instr->context()).is(cp));
1807 DCHECK(ToRegister(instr->left()).is(a1));
1808 DCHECK(ToRegister(instr->right()).is(a0));
1809 DCHECK(ToRegister(instr->result()).is(v0));
1810
Ben Murdoch097c5b22016-05-18 11:27:45 +01001811 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001812 CallCode(code, RelocInfo::CODE_TARGET, instr);
1813 // Other arch use a nop here, to signal that there is no inlined
1814 // patchable code. Mips does not need the nop, since our marker
1815 // instruction (andi zero_reg) will never be used in normal code.
1816}
1817
1818
1819template<class InstrType>
1820void LCodeGen::EmitBranch(InstrType instr,
1821 Condition condition,
1822 Register src1,
1823 const Operand& src2) {
1824 int left_block = instr->TrueDestination(chunk_);
1825 int right_block = instr->FalseDestination(chunk_);
1826
1827 int next_block = GetNextEmittedBlock();
1828 if (right_block == left_block || condition == al) {
1829 EmitGoto(left_block);
1830 } else if (left_block == next_block) {
1831 __ Branch(chunk_->GetAssemblyLabel(right_block),
1832 NegateCondition(condition), src1, src2);
1833 } else if (right_block == next_block) {
1834 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1835 } else {
1836 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1837 __ Branch(chunk_->GetAssemblyLabel(right_block));
1838 }
1839}
1840
1841
1842template<class InstrType>
1843void LCodeGen::EmitBranchF(InstrType instr,
1844 Condition condition,
1845 FPURegister src1,
1846 FPURegister src2) {
1847 int right_block = instr->FalseDestination(chunk_);
1848 int left_block = instr->TrueDestination(chunk_);
1849
1850 int next_block = GetNextEmittedBlock();
1851 if (right_block == left_block) {
1852 EmitGoto(left_block);
1853 } else if (left_block == next_block) {
1854 __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1855 NegateFpuCondition(condition), src1, src2);
1856 } else if (right_block == next_block) {
1857 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1858 condition, src1, src2);
1859 } else {
1860 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1861 condition, src1, src2);
1862 __ Branch(chunk_->GetAssemblyLabel(right_block));
1863 }
1864}
1865
1866
1867template <class InstrType>
1868void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
1869 Register src1, const Operand& src2) {
1870 int true_block = instr->TrueDestination(chunk_);
1871 __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
1872}
1873
1874
1875template <class InstrType>
1876void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
1877 Register src1, const Operand& src2) {
1878 int false_block = instr->FalseDestination(chunk_);
1879 __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
1880}
1881
1882
1883template<class InstrType>
1884void LCodeGen::EmitFalseBranchF(InstrType instr,
1885 Condition condition,
1886 FPURegister src1,
1887 FPURegister src2) {
1888 int false_block = instr->FalseDestination(chunk_);
1889 __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
1890 condition, src1, src2);
1891}
1892
1893
1894void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
1895 __ stop("LDebugBreak");
1896}
1897
1898
1899void LCodeGen::DoBranch(LBranch* instr) {
1900 Representation r = instr->hydrogen()->value()->representation();
1901 if (r.IsInteger32() || r.IsSmi()) {
1902 DCHECK(!info()->IsStub());
1903 Register reg = ToRegister(instr->value());
1904 EmitBranch(instr, ne, reg, Operand(zero_reg));
1905 } else if (r.IsDouble()) {
1906 DCHECK(!info()->IsStub());
1907 DoubleRegister reg = ToDoubleRegister(instr->value());
1908 // Test the double value. Zero and NaN are false.
1909 EmitBranchF(instr, ogl, reg, kDoubleRegZero);
1910 } else {
1911 DCHECK(r.IsTagged());
1912 Register reg = ToRegister(instr->value());
1913 HType type = instr->hydrogen()->value()->type();
1914 if (type.IsBoolean()) {
1915 DCHECK(!info()->IsStub());
1916 __ LoadRoot(at, Heap::kTrueValueRootIndex);
1917 EmitBranch(instr, eq, reg, Operand(at));
1918 } else if (type.IsSmi()) {
1919 DCHECK(!info()->IsStub());
1920 EmitBranch(instr, ne, reg, Operand(zero_reg));
1921 } else if (type.IsJSArray()) {
1922 DCHECK(!info()->IsStub());
1923 EmitBranch(instr, al, zero_reg, Operand(zero_reg));
1924 } else if (type.IsHeapNumber()) {
1925 DCHECK(!info()->IsStub());
1926 DoubleRegister dbl_scratch = double_scratch0();
1927 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
1928 // Test the double value. Zero and NaN are false.
1929 EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
1930 } else if (type.IsString()) {
1931 DCHECK(!info()->IsStub());
1932 __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
1933 EmitBranch(instr, ne, at, Operand(zero_reg));
1934 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01001935 ToBooleanICStub::Types expected =
1936 instr->hydrogen()->expected_input_types();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001937 // Avoid deopts in the case where we've never executed this path before.
Ben Murdochda12d292016-06-02 14:46:10 +01001938 if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001939
Ben Murdochda12d292016-06-02 14:46:10 +01001940 if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001941 // undefined -> false.
1942 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
1943 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
1944 }
Ben Murdochda12d292016-06-02 14:46:10 +01001945 if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001946 // Boolean -> its value.
1947 __ LoadRoot(at, Heap::kTrueValueRootIndex);
1948 __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
1949 __ LoadRoot(at, Heap::kFalseValueRootIndex);
1950 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
1951 }
Ben Murdochda12d292016-06-02 14:46:10 +01001952 if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001953 // 'null' -> false.
1954 __ LoadRoot(at, Heap::kNullValueRootIndex);
1955 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
1956 }
1957
Ben Murdochda12d292016-06-02 14:46:10 +01001958 if (expected.Contains(ToBooleanICStub::SMI)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001959 // Smis: 0 -> false, all other -> true.
1960 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
1961 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
1962 } else if (expected.NeedsMap()) {
1963 // If we need a map later and have a Smi -> deopt.
1964 __ SmiTst(reg, at);
1965 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
1966 }
1967
1968 const Register map = scratch0();
1969 if (expected.NeedsMap()) {
1970 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
1971 if (expected.CanBeUndetectable()) {
1972 // Undetectable -> false.
1973 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1974 __ And(at, at, Operand(1 << Map::kIsUndetectable));
1975 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
1976 }
1977 }
1978
Ben Murdochda12d292016-06-02 14:46:10 +01001979 if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001980 // spec object -> true.
1981 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1982 __ Branch(instr->TrueLabel(chunk_),
1983 ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
1984 }
1985
Ben Murdochda12d292016-06-02 14:46:10 +01001986 if (expected.Contains(ToBooleanICStub::STRING)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001987 // String value -> false iff empty.
1988 Label not_string;
1989 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1990 __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
1991 __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
1992 __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
1993 __ Branch(instr->FalseLabel(chunk_));
1994 __ bind(&not_string);
1995 }
1996
Ben Murdochda12d292016-06-02 14:46:10 +01001997 if (expected.Contains(ToBooleanICStub::SYMBOL)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001998 // Symbol value -> true.
1999 const Register scratch = scratch1();
2000 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2001 __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2002 }
2003
Ben Murdochda12d292016-06-02 14:46:10 +01002004 if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002005 // SIMD value -> true.
2006 const Register scratch = scratch1();
2007 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2008 __ Branch(instr->TrueLabel(chunk_), eq, scratch,
2009 Operand(SIMD128_VALUE_TYPE));
2010 }
2011
Ben Murdochda12d292016-06-02 14:46:10 +01002012 if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002013 // heap number -> false iff +0, -0, or NaN.
2014 DoubleRegister dbl_scratch = double_scratch0();
2015 Label not_heap_number;
2016 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2017 __ Branch(&not_heap_number, ne, map, Operand(at));
2018 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2019 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2020 ne, dbl_scratch, kDoubleRegZero);
2021 // Falls through if dbl_scratch == 0.
2022 __ Branch(instr->FalseLabel(chunk_));
2023 __ bind(&not_heap_number);
2024 }
2025
2026 if (!expected.IsGeneric()) {
2027 // We've seen something for the first time -> deopt.
2028 // This can only happen if we are not generic already.
2029 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
2030 Operand(zero_reg));
2031 }
2032 }
2033 }
2034}
2035
2036
2037void LCodeGen::EmitGoto(int block) {
2038 if (!IsNextEmittedBlock(block)) {
2039 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2040 }
2041}
2042
2043
2044void LCodeGen::DoGoto(LGoto* instr) {
2045 EmitGoto(instr->block_id());
2046}
2047
2048
2049Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2050 Condition cond = kNoCondition;
2051 switch (op) {
2052 case Token::EQ:
2053 case Token::EQ_STRICT:
2054 cond = eq;
2055 break;
2056 case Token::NE:
2057 case Token::NE_STRICT:
2058 cond = ne;
2059 break;
2060 case Token::LT:
2061 cond = is_unsigned ? lo : lt;
2062 break;
2063 case Token::GT:
2064 cond = is_unsigned ? hi : gt;
2065 break;
2066 case Token::LTE:
2067 cond = is_unsigned ? ls : le;
2068 break;
2069 case Token::GTE:
2070 cond = is_unsigned ? hs : ge;
2071 break;
2072 case Token::IN:
2073 case Token::INSTANCEOF:
2074 default:
2075 UNREACHABLE();
2076 }
2077 return cond;
2078}
2079
2080
2081void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2082 LOperand* left = instr->left();
2083 LOperand* right = instr->right();
2084 bool is_unsigned =
2085 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2086 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2087 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2088
2089 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2090 // We can statically evaluate the comparison.
2091 double left_val = ToDouble(LConstantOperand::cast(left));
2092 double right_val = ToDouble(LConstantOperand::cast(right));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002093 int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2094 ? instr->TrueDestination(chunk_)
2095 : instr->FalseDestination(chunk_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002096 EmitGoto(next_block);
2097 } else {
2098 if (instr->is_double()) {
2099 // Compare left and right as doubles and load the
2100 // resulting flags into the normal status register.
2101 FPURegister left_reg = ToDoubleRegister(left);
2102 FPURegister right_reg = ToDoubleRegister(right);
2103
2104 // If a NaN is involved, i.e. the result is unordered,
2105 // jump to false block label.
2106 __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2107 left_reg, right_reg);
2108
2109 EmitBranchF(instr, cond, left_reg, right_reg);
2110 } else {
2111 Register cmp_left;
2112 Operand cmp_right = Operand(0);
2113
2114 if (right->IsConstantOperand()) {
2115 int32_t value = ToInteger32(LConstantOperand::cast(right));
2116 if (instr->hydrogen_value()->representation().IsSmi()) {
2117 cmp_left = ToRegister(left);
2118 cmp_right = Operand(Smi::FromInt(value));
2119 } else {
2120 cmp_left = ToRegister(left);
2121 cmp_right = Operand(value);
2122 }
2123 } else if (left->IsConstantOperand()) {
2124 int32_t value = ToInteger32(LConstantOperand::cast(left));
2125 if (instr->hydrogen_value()->representation().IsSmi()) {
2126 cmp_left = ToRegister(right);
2127 cmp_right = Operand(Smi::FromInt(value));
2128 } else {
2129 cmp_left = ToRegister(right);
2130 cmp_right = Operand(value);
2131 }
2132 // We commuted the operands, so commute the condition.
2133 cond = CommuteCondition(cond);
2134 } else {
2135 cmp_left = ToRegister(left);
2136 cmp_right = Operand(ToRegister(right));
2137 }
2138
2139 EmitBranch(instr, cond, cmp_left, cmp_right);
2140 }
2141 }
2142}
2143
2144
2145void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2146 Register left = ToRegister(instr->left());
2147 Register right = ToRegister(instr->right());
2148
2149 EmitBranch(instr, eq, left, Operand(right));
2150}
2151
2152
2153void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2154 if (instr->hydrogen()->representation().IsTagged()) {
2155 Register input_reg = ToRegister(instr->object());
2156 __ li(at, Operand(factory()->the_hole_value()));
2157 EmitBranch(instr, eq, input_reg, Operand(at));
2158 return;
2159 }
2160
2161 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2162 EmitFalseBranchF(instr, eq, input_reg, input_reg);
2163
2164 Register scratch = scratch0();
2165 __ FmoveHigh(scratch, input_reg);
2166 EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
2167}
2168
2169
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002170Condition LCodeGen::EmitIsString(Register input,
2171 Register temp1,
2172 Label* is_not_string,
2173 SmiCheck check_needed = INLINE_SMI_CHECK) {
2174 if (check_needed == INLINE_SMI_CHECK) {
2175 __ JumpIfSmi(input, is_not_string);
2176 }
2177 __ GetObjectType(input, temp1, temp1);
2178
2179 return lt;
2180}
2181
2182
2183void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2184 Register reg = ToRegister(instr->value());
2185 Register temp1 = ToRegister(instr->temp());
2186
2187 SmiCheck check_needed =
2188 instr->hydrogen()->value()->type().IsHeapObject()
2189 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2190 Condition true_cond =
2191 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2192
2193 EmitBranch(instr, true_cond, temp1,
2194 Operand(FIRST_NONSTRING_TYPE));
2195}
2196
2197
2198void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2199 Register input_reg = EmitLoadRegister(instr->value(), at);
2200 __ And(at, input_reg, kSmiTagMask);
2201 EmitBranch(instr, eq, at, Operand(zero_reg));
2202}
2203
2204
2205void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2206 Register input = ToRegister(instr->value());
2207 Register temp = ToRegister(instr->temp());
2208
2209 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2210 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2211 }
2212 __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2213 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2214 __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2215 EmitBranch(instr, ne, at, Operand(zero_reg));
2216}
2217
2218
2219static Condition ComputeCompareCondition(Token::Value op) {
2220 switch (op) {
2221 case Token::EQ_STRICT:
2222 case Token::EQ:
2223 return eq;
2224 case Token::LT:
2225 return lt;
2226 case Token::GT:
2227 return gt;
2228 case Token::LTE:
2229 return le;
2230 case Token::GTE:
2231 return ge;
2232 default:
2233 UNREACHABLE();
2234 return kNoCondition;
2235 }
2236}
2237
2238
2239void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2240 DCHECK(ToRegister(instr->context()).is(cp));
2241 DCHECK(ToRegister(instr->left()).is(a1));
2242 DCHECK(ToRegister(instr->right()).is(a0));
2243
Ben Murdochda12d292016-06-02 14:46:10 +01002244 Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002245 CallCode(code, RelocInfo::CODE_TARGET, instr);
Ben Murdochda12d292016-06-02 14:46:10 +01002246 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2247 EmitBranch(instr, eq, v0, Operand(at));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002248}
2249
2250
2251static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2252 InstanceType from = instr->from();
2253 InstanceType to = instr->to();
2254 if (from == FIRST_TYPE) return to;
2255 DCHECK(from == to || to == LAST_TYPE);
2256 return from;
2257}
2258
2259
2260static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2261 InstanceType from = instr->from();
2262 InstanceType to = instr->to();
2263 if (from == to) return eq;
2264 if (to == LAST_TYPE) return hs;
2265 if (from == FIRST_TYPE) return ls;
2266 UNREACHABLE();
2267 return eq;
2268}
2269
2270
2271void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2272 Register scratch = scratch0();
2273 Register input = ToRegister(instr->value());
2274
2275 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2276 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2277 }
2278
2279 __ GetObjectType(input, scratch, scratch);
2280 EmitBranch(instr,
2281 BranchCondition(instr->hydrogen()),
2282 scratch,
2283 Operand(TestType(instr->hydrogen())));
2284}
2285
2286
2287void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2288 Register input = ToRegister(instr->value());
2289 Register result = ToRegister(instr->result());
2290
2291 __ AssertString(input);
2292
2293 __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
2294 __ IndexFromHash(result, result);
2295}
2296
2297
2298void LCodeGen::DoHasCachedArrayIndexAndBranch(
2299 LHasCachedArrayIndexAndBranch* instr) {
2300 Register input = ToRegister(instr->value());
2301 Register scratch = scratch0();
2302
2303 __ lw(scratch,
2304 FieldMemOperand(input, String::kHashFieldOffset));
2305 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2306 EmitBranch(instr, eq, at, Operand(zero_reg));
2307}
2308
2309
2310// Branches to a label or falls through with the answer in flags. Trashes
2311// the temp registers, but not the input.
2312void LCodeGen::EmitClassOfTest(Label* is_true,
2313 Label* is_false,
2314 Handle<String>class_name,
2315 Register input,
2316 Register temp,
2317 Register temp2) {
2318 DCHECK(!input.is(temp));
2319 DCHECK(!input.is(temp2));
2320 DCHECK(!temp.is(temp2));
2321
2322 __ JumpIfSmi(input, is_false);
2323 __ GetObjectType(input, temp, temp2);
Ben Murdochda12d292016-06-02 14:46:10 +01002324 STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002325 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002326 __ Branch(is_true, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002327 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01002328 __ Branch(is_false, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002329 }
2330
2331 // Check if the constructor in the map is a function.
2332 Register instance_type = scratch1();
2333 DCHECK(!instance_type.is(temp));
2334 __ GetMapConstructor(temp, temp, temp2, instance_type);
2335
2336 // Objects with a non-function constructor have class 'Object'.
2337 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2338 __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2339 } else {
2340 __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2341 }
2342
2343 // temp now contains the constructor function. Grab the
2344 // instance class name from there.
2345 __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2346 __ lw(temp, FieldMemOperand(temp,
2347 SharedFunctionInfo::kInstanceClassNameOffset));
2348 // The class name we are testing against is internalized since it's a literal.
2349 // The name in the constructor is internalized because of the way the context
2350 // is booted. This routine isn't expected to work for random API-created
2351 // classes and it doesn't have to because you can't access it with natives
2352 // syntax. Since both sides are internalized it is sufficient to use an
2353 // identity comparison.
2354
2355 // End with the address of this class_name instance in temp register.
2356 // On MIPS, the caller must do the comparison with Handle<String>class_name.
2357}
2358
2359
2360void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2361 Register input = ToRegister(instr->value());
2362 Register temp = scratch0();
2363 Register temp2 = ToRegister(instr->temp());
2364 Handle<String> class_name = instr->hydrogen()->class_name();
2365
2366 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2367 class_name, input, temp, temp2);
2368
2369 EmitBranch(instr, eq, temp, Operand(class_name));
2370}
2371
2372
2373void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2374 Register reg = ToRegister(instr->value());
2375 Register temp = ToRegister(instr->temp());
2376
2377 __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2378 EmitBranch(instr, eq, temp, Operand(instr->map()));
2379}
2380
2381
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002382void LCodeGen::DoHasInPrototypeChainAndBranch(
2383 LHasInPrototypeChainAndBranch* instr) {
2384 Register const object = ToRegister(instr->object());
2385 Register const object_map = scratch0();
2386 Register const object_instance_type = scratch1();
2387 Register const object_prototype = object_map;
2388 Register const prototype = ToRegister(instr->prototype());
2389
2390 // The {object} must be a spec object. It's sufficient to know that {object}
2391 // is not a smi, since all other non-spec objects have {null} prototypes and
2392 // will be ruled out below.
2393 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2394 __ SmiTst(object, at);
2395 EmitFalseBranch(instr, eq, at, Operand(zero_reg));
2396 }
2397
2398 // Loop through the {object}s prototype chain looking for the {prototype}.
2399 __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2400 Label loop;
2401 __ bind(&loop);
2402
2403 // Deoptimize if the object needs to be access checked.
2404 __ lbu(object_instance_type,
2405 FieldMemOperand(object_map, Map::kBitFieldOffset));
2406 __ And(object_instance_type, object_instance_type,
2407 Operand(1 << Map::kIsAccessCheckNeeded));
2408 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
2409 Operand(zero_reg));
2410 // Deoptimize for proxies.
2411 __ lbu(object_instance_type,
2412 FieldMemOperand(object_map, Map::kInstanceTypeOffset));
2413 DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
2414 Operand(JS_PROXY_TYPE));
2415
2416 __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002417 __ LoadRoot(at, Heap::kNullValueRootIndex);
2418 EmitFalseBranch(instr, eq, object_prototype, Operand(at));
Ben Murdoch61f157c2016-09-16 13:49:30 +01002419 EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002420 __ Branch(USE_DELAY_SLOT, &loop);
2421 __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
2422}
2423
2424
2425void LCodeGen::DoCmpT(LCmpT* instr) {
2426 DCHECK(ToRegister(instr->context()).is(cp));
2427 Token::Value op = instr->op();
2428
Ben Murdoch097c5b22016-05-18 11:27:45 +01002429 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002430 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2431 // On MIPS there is no need for a "no inlined smi code" marker (nop).
2432
2433 Condition condition = ComputeCompareCondition(op);
2434 // A minor optimization that relies on LoadRoot always emitting one
2435 // instruction.
2436 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2437 Label done, check;
2438 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2439 __ bind(&check);
2440 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2441 DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
2442 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2443 __ bind(&done);
2444}
2445
2446
2447void LCodeGen::DoReturn(LReturn* instr) {
2448 if (FLAG_trace && info()->IsOptimizing()) {
2449 // Push the return value on the stack as the parameter.
2450 // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2451 // managed by the register allocator and tearing down the frame, it's
2452 // safe to write to the context register.
2453 __ push(v0);
2454 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2455 __ CallRuntime(Runtime::kTraceExit);
2456 }
2457 if (info()->saves_caller_doubles()) {
2458 RestoreCallerDoubles();
2459 }
2460 if (NeedsEagerFrame()) {
2461 __ mov(sp, fp);
2462 __ Pop(ra, fp);
2463 }
2464 if (instr->has_constant_parameter_count()) {
2465 int parameter_count = ToInteger32(instr->constant_parameter_count());
2466 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2467 if (sp_delta != 0) {
2468 __ Addu(sp, sp, Operand(sp_delta));
2469 }
2470 } else {
2471 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2472 Register reg = ToRegister(instr->parameter_count());
2473 // The argument count parameter is a smi
2474 __ SmiUntag(reg);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002475 __ Lsa(sp, sp, reg, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002476 }
2477
2478 __ Jump(ra);
2479}
2480
2481
2482template <class T>
2483void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2484 Register vector_register = ToRegister(instr->temp_vector());
2485 Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2486 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2487 DCHECK(slot_register.is(a0));
2488
2489 AllowDeferredHandleDereference vector_structure_check;
2490 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2491 __ li(vector_register, vector);
2492 // No need to allocate this register.
2493 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2494 int index = vector->GetIndex(slot);
2495 __ li(slot_register, Operand(Smi::FromInt(index)));
2496}
2497
2498
2499template <class T>
2500void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2501 Register vector_register = ToRegister(instr->temp_vector());
2502 Register slot_register = ToRegister(instr->temp_slot());
2503
2504 AllowDeferredHandleDereference vector_structure_check;
2505 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2506 __ li(vector_register, vector);
2507 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2508 int index = vector->GetIndex(slot);
2509 __ li(slot_register, Operand(Smi::FromInt(index)));
2510}
2511
2512
2513void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2514 DCHECK(ToRegister(instr->context()).is(cp));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002515 DCHECK(ToRegister(instr->result()).is(v0));
2516
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002517 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
Ben Murdoch61f157c2016-09-16 13:49:30 +01002518 Handle<Code> ic =
2519 CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
2520 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002521 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2522}
2523
2524
2525void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2526 Register context = ToRegister(instr->context());
2527 Register result = ToRegister(instr->result());
2528
2529 __ lw(result, ContextMemOperand(context, instr->slot_index()));
2530 if (instr->hydrogen()->RequiresHoleCheck()) {
2531 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2532
2533 if (instr->hydrogen()->DeoptimizesOnHole()) {
2534 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
2535 } else {
2536 Label is_not_hole;
2537 __ Branch(&is_not_hole, ne, result, Operand(at));
2538 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2539 __ bind(&is_not_hole);
2540 }
2541 }
2542}
2543
2544
2545void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2546 Register context = ToRegister(instr->context());
2547 Register value = ToRegister(instr->value());
2548 Register scratch = scratch0();
2549 MemOperand target = ContextMemOperand(context, instr->slot_index());
2550
2551 Label skip_assignment;
2552
2553 if (instr->hydrogen()->RequiresHoleCheck()) {
2554 __ lw(scratch, target);
2555 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2556
2557 if (instr->hydrogen()->DeoptimizesOnHole()) {
2558 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
2559 } else {
2560 __ Branch(&skip_assignment, ne, scratch, Operand(at));
2561 }
2562 }
2563
2564 __ sw(value, target);
2565 if (instr->hydrogen()->NeedsWriteBarrier()) {
2566 SmiCheck check_needed =
2567 instr->hydrogen()->value()->type().IsHeapObject()
2568 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2569 __ RecordWriteContextSlot(context,
2570 target.offset(),
2571 value,
2572 scratch0(),
2573 GetRAState(),
2574 kSaveFPRegs,
2575 EMIT_REMEMBERED_SET,
2576 check_needed);
2577 }
2578
2579 __ bind(&skip_assignment);
2580}
2581
2582
2583void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2584 HObjectAccess access = instr->hydrogen()->access();
2585 int offset = access.offset();
2586 Register object = ToRegister(instr->object());
2587
2588 if (access.IsExternalMemory()) {
2589 Register result = ToRegister(instr->result());
2590 MemOperand operand = MemOperand(object, offset);
2591 __ Load(result, operand, access.representation());
2592 return;
2593 }
2594
2595 if (instr->hydrogen()->representation().IsDouble()) {
2596 DoubleRegister result = ToDoubleRegister(instr->result());
2597 __ ldc1(result, FieldMemOperand(object, offset));
2598 return;
2599 }
2600
2601 Register result = ToRegister(instr->result());
2602 if (!access.IsInobject()) {
2603 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2604 object = result;
2605 }
2606 MemOperand operand = FieldMemOperand(object, offset);
2607 __ Load(result, operand, access.representation());
2608}
2609
2610
2611void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2612 DCHECK(ToRegister(instr->context()).is(cp));
2613 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2614 DCHECK(ToRegister(instr->result()).is(v0));
2615
2616 // Name is always in a2.
2617 __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2618 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Ben Murdoch61f157c2016-09-16 13:49:30 +01002619 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002620 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2621}
2622
2623
2624void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2625 Register scratch = scratch0();
2626 Register function = ToRegister(instr->function());
2627 Register result = ToRegister(instr->result());
2628
2629 // Get the prototype or initial map from the function.
2630 __ lw(result,
2631 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2632
2633 // Check that the function has a prototype or an initial map.
2634 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2635 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
2636
2637 // If the function does not have an initial map, we're done.
2638 Label done;
2639 __ GetObjectType(result, scratch, scratch);
2640 __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2641
2642 // Get the prototype from the initial map.
2643 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2644
2645 // All done.
2646 __ bind(&done);
2647}
2648
2649
2650void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2651 Register result = ToRegister(instr->result());
2652 __ LoadRoot(result, instr->index());
2653}
2654
2655
2656void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2657 Register arguments = ToRegister(instr->arguments());
2658 Register result = ToRegister(instr->result());
2659 // There are two words between the frame pointer and the last argument.
2660 // Subtracting from length accounts for one of them add one more.
2661 if (instr->length()->IsConstantOperand()) {
2662 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2663 if (instr->index()->IsConstantOperand()) {
2664 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2665 int index = (const_length - const_index) + 1;
2666 __ lw(result, MemOperand(arguments, index * kPointerSize));
2667 } else {
2668 Register index = ToRegister(instr->index());
2669 __ li(at, Operand(const_length + 1));
2670 __ Subu(result, at, index);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002671 __ Lsa(at, arguments, result, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002672 __ lw(result, MemOperand(at));
2673 }
2674 } else if (instr->index()->IsConstantOperand()) {
2675 Register length = ToRegister(instr->length());
2676 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2677 int loc = const_index - 1;
2678 if (loc != 0) {
2679 __ Subu(result, length, Operand(loc));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002680 __ Lsa(at, arguments, result, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002681 __ lw(result, MemOperand(at));
2682 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002683 __ Lsa(at, arguments, length, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002684 __ lw(result, MemOperand(at));
2685 }
2686 } else {
2687 Register length = ToRegister(instr->length());
2688 Register index = ToRegister(instr->index());
2689 __ Subu(result, length, index);
2690 __ Addu(result, result, 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002691 __ Lsa(at, arguments, result, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002692 __ lw(result, MemOperand(at));
2693 }
2694}
2695
2696
2697void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2698 Register external_pointer = ToRegister(instr->elements());
2699 Register key = no_reg;
2700 ElementsKind elements_kind = instr->elements_kind();
2701 bool key_is_constant = instr->key()->IsConstantOperand();
2702 int constant_key = 0;
2703 if (key_is_constant) {
2704 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2705 if (constant_key & 0xF0000000) {
2706 Abort(kArrayIndexConstantValueTooBig);
2707 }
2708 } else {
2709 key = ToRegister(instr->key());
2710 }
2711 int element_size_shift = ElementsKindToShiftSize(elements_kind);
2712 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2713 ? (element_size_shift - kSmiTagSize) : element_size_shift;
2714 int base_offset = instr->base_offset();
2715
2716 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2717 FPURegister result = ToDoubleRegister(instr->result());
2718 if (key_is_constant) {
2719 __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
2720 } else {
2721 __ sll(scratch0(), key, shift_size);
2722 __ Addu(scratch0(), scratch0(), external_pointer);
2723 }
2724 if (elements_kind == FLOAT32_ELEMENTS) {
2725 __ lwc1(result, MemOperand(scratch0(), base_offset));
2726 __ cvt_d_s(result, result);
2727 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2728 __ ldc1(result, MemOperand(scratch0(), base_offset));
2729 }
2730 } else {
2731 Register result = ToRegister(instr->result());
2732 MemOperand mem_operand = PrepareKeyedOperand(
2733 key, external_pointer, key_is_constant, constant_key,
2734 element_size_shift, shift_size, base_offset);
2735 switch (elements_kind) {
2736 case INT8_ELEMENTS:
2737 __ lb(result, mem_operand);
2738 break;
2739 case UINT8_ELEMENTS:
2740 case UINT8_CLAMPED_ELEMENTS:
2741 __ lbu(result, mem_operand);
2742 break;
2743 case INT16_ELEMENTS:
2744 __ lh(result, mem_operand);
2745 break;
2746 case UINT16_ELEMENTS:
2747 __ lhu(result, mem_operand);
2748 break;
2749 case INT32_ELEMENTS:
2750 __ lw(result, mem_operand);
2751 break;
2752 case UINT32_ELEMENTS:
2753 __ lw(result, mem_operand);
2754 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2755 DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
2756 result, Operand(0x80000000));
2757 }
2758 break;
2759 case FLOAT32_ELEMENTS:
2760 case FLOAT64_ELEMENTS:
2761 case FAST_DOUBLE_ELEMENTS:
2762 case FAST_ELEMENTS:
2763 case FAST_SMI_ELEMENTS:
2764 case FAST_HOLEY_DOUBLE_ELEMENTS:
2765 case FAST_HOLEY_ELEMENTS:
2766 case FAST_HOLEY_SMI_ELEMENTS:
2767 case DICTIONARY_ELEMENTS:
2768 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2769 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002770 case FAST_STRING_WRAPPER_ELEMENTS:
2771 case SLOW_STRING_WRAPPER_ELEMENTS:
2772 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002773 UNREACHABLE();
2774 break;
2775 }
2776 }
2777}
2778
2779
2780void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2781 Register elements = ToRegister(instr->elements());
2782 bool key_is_constant = instr->key()->IsConstantOperand();
2783 Register key = no_reg;
2784 DoubleRegister result = ToDoubleRegister(instr->result());
2785 Register scratch = scratch0();
2786
2787 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2788
2789 int base_offset = instr->base_offset();
2790 if (key_is_constant) {
2791 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2792 if (constant_key & 0xF0000000) {
2793 Abort(kArrayIndexConstantValueTooBig);
2794 }
2795 base_offset += constant_key * kDoubleSize;
2796 }
2797 __ Addu(scratch, elements, Operand(base_offset));
2798
2799 if (!key_is_constant) {
2800 key = ToRegister(instr->key());
2801 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2802 ? (element_size_shift - kSmiTagSize) : element_size_shift;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002803 __ Lsa(scratch, scratch, key, shift_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002804 }
2805
2806 __ ldc1(result, MemOperand(scratch));
2807
2808 if (instr->hydrogen()->RequiresHoleCheck()) {
2809 __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
2810 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
2811 Operand(kHoleNanUpper32));
2812 }
2813}
2814
2815
2816void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2817 Register elements = ToRegister(instr->elements());
2818 Register result = ToRegister(instr->result());
2819 Register scratch = scratch0();
2820 Register store_base = scratch;
2821 int offset = instr->base_offset();
2822
2823 if (instr->key()->IsConstantOperand()) {
2824 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2825 offset += ToInteger32(const_operand) * kPointerSize;
2826 store_base = elements;
2827 } else {
2828 Register key = ToRegister(instr->key());
2829 // Even though the HLoadKeyed instruction forces the input
2830 // representation for the key to be an integer, the input gets replaced
2831 // during bound check elimination with the index argument to the bounds
2832 // check, which can be tagged, so that case must be handled here, too.
2833 if (instr->hydrogen()->key()->representation().IsSmi()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002834 __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002835 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002836 __ Lsa(scratch, elements, key, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002837 }
2838 }
2839 __ lw(result, MemOperand(store_base, offset));
2840
2841 // Check for the hole value.
2842 if (instr->hydrogen()->RequiresHoleCheck()) {
2843 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2844 __ SmiTst(result, scratch);
2845 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
2846 Operand(zero_reg));
2847 } else {
2848 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2849 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
2850 }
2851 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
2852 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
2853 Label done;
2854 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2855 __ Branch(&done, ne, result, Operand(scratch));
2856 if (info()->IsStub()) {
2857 // A stub can safely convert the hole to undefined only if the array
2858 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
2859 // it needs to bail out.
2860 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
2861 __ lw(result, FieldMemOperand(result, Cell::kValueOffset));
2862 DeoptimizeIf(ne, instr, Deoptimizer::kHole, result,
2863 Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
2864 }
2865 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2866 __ bind(&done);
2867 }
2868}
2869
2870
2871void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
2872 if (instr->is_fixed_typed_array()) {
2873 DoLoadKeyedExternalArray(instr);
2874 } else if (instr->hydrogen()->representation().IsDouble()) {
2875 DoLoadKeyedFixedDoubleArray(instr);
2876 } else {
2877 DoLoadKeyedFixedArray(instr);
2878 }
2879}
2880
2881
2882MemOperand LCodeGen::PrepareKeyedOperand(Register key,
2883 Register base,
2884 bool key_is_constant,
2885 int constant_key,
2886 int element_size,
2887 int shift_size,
2888 int base_offset) {
2889 if (key_is_constant) {
2890 return MemOperand(base, (constant_key << element_size) + base_offset);
2891 }
2892
2893 if (base_offset == 0) {
2894 if (shift_size >= 0) {
2895 __ sll(scratch0(), key, shift_size);
2896 __ Addu(scratch0(), base, scratch0());
2897 return MemOperand(scratch0());
2898 } else {
2899 DCHECK_EQ(-1, shift_size);
2900 __ srl(scratch0(), key, 1);
2901 __ Addu(scratch0(), base, scratch0());
2902 return MemOperand(scratch0());
2903 }
2904 }
2905
2906 if (shift_size >= 0) {
2907 __ sll(scratch0(), key, shift_size);
2908 __ Addu(scratch0(), base, scratch0());
2909 return MemOperand(scratch0(), base_offset);
2910 } else {
2911 DCHECK_EQ(-1, shift_size);
2912 __ sra(scratch0(), key, 1);
2913 __ Addu(scratch0(), base, scratch0());
2914 return MemOperand(scratch0(), base_offset);
2915 }
2916}
2917
2918
2919void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2920 DCHECK(ToRegister(instr->context()).is(cp));
2921 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2922 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
2923
Ben Murdoch61f157c2016-09-16 13:49:30 +01002924 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002925
Ben Murdoch61f157c2016-09-16 13:49:30 +01002926 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002927 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2928}
2929
2930
2931void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2932 Register scratch = scratch0();
2933 Register temp = scratch1();
2934 Register result = ToRegister(instr->result());
2935
2936 if (instr->hydrogen()->from_inlined()) {
2937 __ Subu(result, sp, 2 * kPointerSize);
Ben Murdochda12d292016-06-02 14:46:10 +01002938 } else if (instr->hydrogen()->arguments_adaptor()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002939 // Check if the calling frame is an arguments adaptor frame.
2940 Label done, adapted;
2941 __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01002942 __ lw(result,
2943 MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002944 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2945
2946 // Result is the frame pointer for the frame if not adapted and for the real
2947 // frame below the adaptor frame if adapted.
2948 __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
2949 __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
Ben Murdochda12d292016-06-02 14:46:10 +01002950 } else {
2951 __ mov(result, fp);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002952 }
2953}
2954
2955
2956void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2957 Register elem = ToRegister(instr->elements());
2958 Register result = ToRegister(instr->result());
2959
2960 Label done;
2961
2962 // If no arguments adaptor frame the number of arguments is fixed.
2963 __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
2964 __ Branch(&done, eq, fp, Operand(elem));
2965
2966 // Arguments adaptor frame present. Get argument length from there.
2967 __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2968 __ lw(result,
2969 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
2970 __ SmiUntag(result);
2971
2972 // Argument length is in result register.
2973 __ bind(&done);
2974}
2975
2976
2977void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2978 Register receiver = ToRegister(instr->receiver());
2979 Register function = ToRegister(instr->function());
2980 Register result = ToRegister(instr->result());
2981 Register scratch = scratch0();
2982
2983 // If the receiver is null or undefined, we have to pass the global
2984 // object as a receiver to normal functions. Values have to be
2985 // passed unchanged to builtins and strict-mode functions.
2986 Label global_object, result_in_receiver;
2987
2988 if (!instr->hydrogen()->known_function()) {
2989 // Do not transform the receiver to object for strict mode
2990 // functions.
2991 __ lw(scratch,
2992 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2993 __ lw(scratch,
2994 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2995
2996 // Do not transform the receiver to object for builtins.
2997 int32_t strict_mode_function_mask =
2998 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
2999 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
3000 __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
3001 __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
3002 }
3003
3004 // Normal function. Replace undefined or null with global receiver.
3005 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3006 __ Branch(&global_object, eq, receiver, Operand(scratch));
3007 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3008 __ Branch(&global_object, eq, receiver, Operand(scratch));
3009
3010 // Deoptimize if the receiver is not a JS object.
3011 __ SmiTst(receiver, scratch);
3012 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
3013
3014 __ GetObjectType(receiver, scratch, scratch);
3015 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
3016 Operand(FIRST_JS_RECEIVER_TYPE));
3017
3018 __ Branch(&result_in_receiver);
3019 __ bind(&global_object);
3020 __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
3021 __ lw(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3022 __ lw(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3023
3024 if (result.is(receiver)) {
3025 __ bind(&result_in_receiver);
3026 } else {
3027 Label result_ok;
3028 __ Branch(&result_ok);
3029 __ bind(&result_in_receiver);
3030 __ mov(result, receiver);
3031 __ bind(&result_ok);
3032 }
3033}
3034
3035
3036void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3037 Register receiver = ToRegister(instr->receiver());
3038 Register function = ToRegister(instr->function());
3039 Register length = ToRegister(instr->length());
3040 Register elements = ToRegister(instr->elements());
3041 Register scratch = scratch0();
3042 DCHECK(receiver.is(a0)); // Used for parameter count.
3043 DCHECK(function.is(a1)); // Required by InvokeFunction.
3044 DCHECK(ToRegister(instr->result()).is(v0));
3045
3046 // Copy the arguments to this function possibly from the
3047 // adaptor frame below it.
3048 const uint32_t kArgumentsLimit = 1 * KB;
3049 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
3050 Operand(kArgumentsLimit));
3051
3052 // Push the receiver and use the register to keep the original
3053 // number of arguments.
3054 __ push(receiver);
3055 __ Move(receiver, length);
3056 // The arguments are at a one pointer size offset from elements.
3057 __ Addu(elements, elements, Operand(1 * kPointerSize));
3058
3059 // Loop through the arguments pushing them onto the execution
3060 // stack.
3061 Label invoke, loop;
3062 // length is a small non-negative integer, due to the test above.
3063 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3064 __ sll(scratch, length, 2);
3065 __ bind(&loop);
3066 __ Addu(scratch, elements, scratch);
3067 __ lw(scratch, MemOperand(scratch));
3068 __ push(scratch);
3069 __ Subu(length, length, Operand(1));
3070 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3071 __ sll(scratch, length, 2);
3072
3073 __ bind(&invoke);
Ben Murdochda12d292016-06-02 14:46:10 +01003074
3075 InvokeFlag flag = CALL_FUNCTION;
3076 if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
3077 DCHECK(!info()->saves_caller_doubles());
3078 // TODO(ishell): drop current frame before pushing arguments to the stack.
3079 flag = JUMP_FUNCTION;
3080 ParameterCount actual(a0);
3081 // It is safe to use t0, t1 and t2 as scratch registers here given that
3082 // we are not going to return to caller function anyway.
3083 PrepareForTailCall(actual, t0, t1, t2);
3084 }
3085
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003086 DCHECK(instr->HasPointerMap());
3087 LPointerMap* pointers = instr->pointer_map();
Ben Murdochda12d292016-06-02 14:46:10 +01003088 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003089 // The number of arguments is stored in receiver which is a0, as expected
3090 // by InvokeFunction.
3091 ParameterCount actual(receiver);
Ben Murdochda12d292016-06-02 14:46:10 +01003092 __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003093}
3094
3095
3096void LCodeGen::DoPushArgument(LPushArgument* instr) {
3097 LOperand* argument = instr->value();
3098 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3099 Abort(kDoPushArgumentNotImplementedForDoubleType);
3100 } else {
3101 Register argument_reg = EmitLoadRegister(argument, at);
3102 __ push(argument_reg);
3103 }
3104}
3105
3106
3107void LCodeGen::DoDrop(LDrop* instr) {
3108 __ Drop(instr->count());
3109}
3110
3111
3112void LCodeGen::DoThisFunction(LThisFunction* instr) {
3113 Register result = ToRegister(instr->result());
3114 __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3115}
3116
3117
3118void LCodeGen::DoContext(LContext* instr) {
3119 // If there is a non-return use, the context must be moved to a register.
3120 Register result = ToRegister(instr->result());
3121 if (info()->IsOptimizing()) {
3122 __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3123 } else {
3124 // If there is no frame, the context must be in cp.
3125 DCHECK(result.is(cp));
3126 }
3127}
3128
3129
3130void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3131 DCHECK(ToRegister(instr->context()).is(cp));
3132 __ li(scratch0(), instr->hydrogen()->pairs());
3133 __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3134 __ Push(scratch0(), scratch1());
3135 CallRuntime(Runtime::kDeclareGlobals, instr);
3136}
3137
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003138void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3139 int formal_parameter_count, int arity,
Ben Murdochda12d292016-06-02 14:46:10 +01003140 bool is_tail_call, LInstruction* instr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003141 bool dont_adapt_arguments =
3142 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3143 bool can_invoke_directly =
3144 dont_adapt_arguments || formal_parameter_count == arity;
3145
3146 Register function_reg = a1;
3147 LPointerMap* pointers = instr->pointer_map();
3148
3149 if (can_invoke_directly) {
3150 // Change context.
3151 __ lw(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3152
3153 // Always initialize new target and number of actual arguments.
3154 __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
3155 __ li(a0, Operand(arity));
3156
Ben Murdochda12d292016-06-02 14:46:10 +01003157 bool is_self_call = function.is_identical_to(info()->closure());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003158
Ben Murdochda12d292016-06-02 14:46:10 +01003159 // Invoke function.
3160 if (is_self_call) {
3161 Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
3162 if (is_tail_call) {
3163 __ Jump(self, RelocInfo::CODE_TARGET);
3164 } else {
3165 __ Call(self, RelocInfo::CODE_TARGET);
3166 }
3167 } else {
3168 __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3169 if (is_tail_call) {
3170 __ Jump(at);
3171 } else {
3172 __ Call(at);
3173 }
3174 }
3175
3176 if (!is_tail_call) {
3177 // Set up deoptimization.
3178 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3179 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003180 } else {
3181 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdochda12d292016-06-02 14:46:10 +01003182 ParameterCount actual(arity);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003183 ParameterCount expected(formal_parameter_count);
Ben Murdochda12d292016-06-02 14:46:10 +01003184 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3185 __ InvokeFunction(function_reg, expected, actual, flag, generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003186 }
3187}
3188
3189
3190void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3191 DCHECK(instr->context() != NULL);
3192 DCHECK(ToRegister(instr->context()).is(cp));
3193 Register input = ToRegister(instr->value());
3194 Register result = ToRegister(instr->result());
3195 Register scratch = scratch0();
3196
3197 // Deoptimize if not a heap number.
3198 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3199 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3200 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
3201
3202 Label done;
3203 Register exponent = scratch0();
3204 scratch = no_reg;
3205 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3206 // Check the sign of the argument. If the argument is positive, just
3207 // return it.
3208 __ Move(result, input);
3209 __ And(at, exponent, Operand(HeapNumber::kSignMask));
3210 __ Branch(&done, eq, at, Operand(zero_reg));
3211
3212 // Input is negative. Reverse its sign.
3213 // Preserve the value of all registers.
3214 {
3215 PushSafepointRegistersScope scope(this);
3216
3217 // Registers were saved at the safepoint, so we can use
3218 // many scratch registers.
3219 Register tmp1 = input.is(a1) ? a0 : a1;
3220 Register tmp2 = input.is(a2) ? a0 : a2;
3221 Register tmp3 = input.is(a3) ? a0 : a3;
3222 Register tmp4 = input.is(t0) ? a0 : t0;
3223
3224 // exponent: floating point exponent value.
3225
3226 Label allocated, slow;
3227 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3228 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3229 __ Branch(&allocated);
3230
3231 // Slow case: Call the runtime system to do the number allocation.
3232 __ bind(&slow);
3233
3234 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3235 instr->context());
3236 // Set the pointer to the new heap number in tmp.
3237 if (!tmp1.is(v0))
3238 __ mov(tmp1, v0);
3239 // Restore input_reg after call to runtime.
3240 __ LoadFromSafepointRegisterSlot(input, input);
3241 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3242
3243 __ bind(&allocated);
3244 // exponent: floating point exponent value.
3245 // tmp1: allocated heap number.
3246 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3247 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3248 __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3249 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3250
3251 __ StoreToSafepointRegisterSlot(tmp1, result);
3252 }
3253
3254 __ bind(&done);
3255}
3256
3257
3258void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3259 Register input = ToRegister(instr->value());
3260 Register result = ToRegister(instr->result());
3261 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3262 Label done;
3263 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3264 __ mov(result, input);
3265 __ subu(result, zero_reg, input);
3266 // Overflow if result is still negative, i.e. 0x80000000.
3267 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
3268 __ bind(&done);
3269}
3270
3271
3272void LCodeGen::DoMathAbs(LMathAbs* instr) {
3273 // Class for deferred case.
3274 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3275 public:
3276 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3277 : LDeferredCode(codegen), instr_(instr) { }
3278 void Generate() override {
3279 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3280 }
3281 LInstruction* instr() override { return instr_; }
3282
3283 private:
3284 LMathAbs* instr_;
3285 };
3286
3287 Representation r = instr->hydrogen()->value()->representation();
3288 if (r.IsDouble()) {
3289 FPURegister input = ToDoubleRegister(instr->value());
3290 FPURegister result = ToDoubleRegister(instr->result());
3291 __ abs_d(result, input);
3292 } else if (r.IsSmiOrInteger32()) {
3293 EmitIntegerMathAbs(instr);
3294 } else {
3295 // Representation is tagged.
3296 DeferredMathAbsTaggedHeapNumber* deferred =
3297 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3298 Register input = ToRegister(instr->value());
3299 // Smi check.
3300 __ JumpIfNotSmi(input, deferred->entry());
3301 // If smi, handle it directly.
3302 EmitIntegerMathAbs(instr);
3303 __ bind(deferred->exit());
3304 }
3305}
3306
3307
3308void LCodeGen::DoMathFloor(LMathFloor* instr) {
3309 DoubleRegister input = ToDoubleRegister(instr->value());
3310 Register result = ToRegister(instr->result());
3311 Register scratch1 = scratch0();
3312 Register except_flag = ToRegister(instr->temp());
3313
3314 __ EmitFPUTruncate(kRoundToMinusInf,
3315 result,
3316 input,
3317 scratch1,
3318 double_scratch0(),
3319 except_flag);
3320
3321 // Deopt if the operation did not succeed.
3322 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3323 Operand(zero_reg));
3324
3325 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3326 // Test for -0.
3327 Label done;
3328 __ Branch(&done, ne, result, Operand(zero_reg));
3329 __ Mfhc1(scratch1, input);
3330 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3331 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
3332 Operand(zero_reg));
3333 __ bind(&done);
3334 }
3335}
3336
3337
3338void LCodeGen::DoMathRound(LMathRound* instr) {
3339 DoubleRegister input = ToDoubleRegister(instr->value());
3340 Register result = ToRegister(instr->result());
3341 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3342 Register scratch = scratch0();
3343 Label done, check_sign_on_zero;
3344
3345 // Extract exponent bits.
3346 __ Mfhc1(result, input);
3347 __ Ext(scratch,
3348 result,
3349 HeapNumber::kExponentShift,
3350 HeapNumber::kExponentBits);
3351
3352 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3353 Label skip1;
3354 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3355 __ mov(result, zero_reg);
3356 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3357 __ Branch(&check_sign_on_zero);
3358 } else {
3359 __ Branch(&done);
3360 }
3361 __ bind(&skip1);
3362
3363 // The following conversion will not work with numbers
3364 // outside of ]-2^32, 2^32[.
3365 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
3366 Operand(HeapNumber::kExponentBias + 32));
3367
3368 // Save the original sign for later comparison.
3369 __ And(scratch, result, Operand(HeapNumber::kSignMask));
3370
3371 __ Move(double_scratch0(), 0.5);
3372 __ add_d(double_scratch0(), input, double_scratch0());
3373
3374 // Check sign of the result: if the sign changed, the input
3375 // value was in ]0.5, 0[ and the result should be -0.
3376 __ Mfhc1(result, double_scratch0());
3377 __ Xor(result, result, Operand(scratch));
3378 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3379 // ARM uses 'mi' here, which is 'lt'
3380 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
3381 } else {
3382 Label skip2;
3383 // ARM uses 'mi' here, which is 'lt'
3384 // Negating it results in 'ge'
3385 __ Branch(&skip2, ge, result, Operand(zero_reg));
3386 __ mov(result, zero_reg);
3387 __ Branch(&done);
3388 __ bind(&skip2);
3389 }
3390
3391 Register except_flag = scratch;
3392 __ EmitFPUTruncate(kRoundToMinusInf,
3393 result,
3394 double_scratch0(),
3395 at,
3396 double_scratch1,
3397 except_flag);
3398
3399 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3400 Operand(zero_reg));
3401
3402 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3403 // Test for -0.
3404 __ Branch(&done, ne, result, Operand(zero_reg));
3405 __ bind(&check_sign_on_zero);
3406 __ Mfhc1(scratch, input);
3407 __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3408 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
3409 Operand(zero_reg));
3410 }
3411 __ bind(&done);
3412}
3413
3414
3415void LCodeGen::DoMathFround(LMathFround* instr) {
3416 DoubleRegister input = ToDoubleRegister(instr->value());
3417 DoubleRegister result = ToDoubleRegister(instr->result());
3418 __ cvt_s_d(result.low(), input);
3419 __ cvt_d_s(result, result.low());
3420}
3421
3422
3423void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3424 DoubleRegister input = ToDoubleRegister(instr->value());
3425 DoubleRegister result = ToDoubleRegister(instr->result());
3426 __ sqrt_d(result, input);
3427}
3428
3429
3430void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3431 DoubleRegister input = ToDoubleRegister(instr->value());
3432 DoubleRegister result = ToDoubleRegister(instr->result());
3433 DoubleRegister temp = ToDoubleRegister(instr->temp());
3434
3435 DCHECK(!input.is(result));
3436
3437 // Note that according to ECMA-262 15.8.2.13:
3438 // Math.pow(-Infinity, 0.5) == Infinity
3439 // Math.sqrt(-Infinity) == NaN
3440 Label done;
3441 __ Move(temp, static_cast<double>(-V8_INFINITY));
3442 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3443 // Set up Infinity in the delay slot.
3444 // result is overwritten if the branch is not taken.
3445 __ neg_d(result, temp);
3446
3447 // Add +0 to convert -0 to +0.
3448 __ add_d(result, input, kDoubleRegZero);
3449 __ sqrt_d(result, result);
3450 __ bind(&done);
3451}
3452
3453
3454void LCodeGen::DoPower(LPower* instr) {
3455 Representation exponent_type = instr->hydrogen()->right()->representation();
3456 // Having marked this as a call, we can use any registers.
3457 // Just make sure that the input/output registers are the expected ones.
3458 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3459 DCHECK(!instr->right()->IsDoubleRegister() ||
3460 ToDoubleRegister(instr->right()).is(f4));
3461 DCHECK(!instr->right()->IsRegister() ||
3462 ToRegister(instr->right()).is(tagged_exponent));
3463 DCHECK(ToDoubleRegister(instr->left()).is(f2));
3464 DCHECK(ToDoubleRegister(instr->result()).is(f0));
3465
3466 if (exponent_type.IsSmi()) {
3467 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3468 __ CallStub(&stub);
3469 } else if (exponent_type.IsTagged()) {
3470 Label no_deopt;
3471 __ JumpIfSmi(tagged_exponent, &no_deopt);
3472 DCHECK(!t3.is(tagged_exponent));
3473 __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3474 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3475 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, t3, Operand(at));
3476 __ bind(&no_deopt);
3477 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3478 __ CallStub(&stub);
3479 } else if (exponent_type.IsInteger32()) {
3480 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3481 __ CallStub(&stub);
3482 } else {
3483 DCHECK(exponent_type.IsDouble());
3484 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3485 __ CallStub(&stub);
3486 }
3487}
3488
Ben Murdoch61f157c2016-09-16 13:49:30 +01003489void LCodeGen::DoMathCos(LMathCos* instr) {
3490 __ PrepareCallCFunction(0, 1, scratch0());
3491 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3492 __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
3493 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3494}
3495
3496void LCodeGen::DoMathSin(LMathSin* instr) {
3497 __ PrepareCallCFunction(0, 1, scratch0());
3498 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3499 __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
3500 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3501}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003502
3503void LCodeGen::DoMathExp(LMathExp* instr) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003504 __ PrepareCallCFunction(0, 1, scratch0());
3505 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3506 __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
3507 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003508}
3509
3510
3511void LCodeGen::DoMathLog(LMathLog* instr) {
3512 __ PrepareCallCFunction(0, 1, scratch0());
3513 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
Ben Murdoch61f157c2016-09-16 13:49:30 +01003514 __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003515 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3516}
3517
3518
3519void LCodeGen::DoMathClz32(LMathClz32* instr) {
3520 Register input = ToRegister(instr->value());
3521 Register result = ToRegister(instr->result());
3522 __ Clz(result, input);
3523}
3524
Ben Murdochda12d292016-06-02 14:46:10 +01003525void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
3526 Register scratch1, Register scratch2,
3527 Register scratch3) {
3528#if DEBUG
3529 if (actual.is_reg()) {
3530 DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
3531 } else {
3532 DCHECK(!AreAliased(scratch1, scratch2, scratch3));
3533 }
3534#endif
3535 if (FLAG_code_comments) {
3536 if (actual.is_reg()) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003537 Comment(";;; PrepareForTailCall, actual: %s {",
3538 RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
3539 actual.reg().code()));
Ben Murdochda12d292016-06-02 14:46:10 +01003540 } else {
3541 Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
3542 }
3543 }
3544
3545 // Check if next frame is an arguments adaptor frame.
3546 Register caller_args_count_reg = scratch1;
3547 Label no_arguments_adaptor, formal_parameter_count_loaded;
3548 __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3549 __ lw(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
3550 __ Branch(&no_arguments_adaptor, ne, scratch3,
3551 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3552
3553 // Drop current frame and load arguments count from arguments adaptor frame.
3554 __ mov(fp, scratch2);
3555 __ lw(caller_args_count_reg,
3556 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
3557 __ SmiUntag(caller_args_count_reg);
3558 __ Branch(&formal_parameter_count_loaded);
3559
3560 __ bind(&no_arguments_adaptor);
3561 // Load caller's formal parameter count
3562 __ lw(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3563 __ lw(scratch1,
3564 FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
3565 __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
3566
3567 __ bind(&formal_parameter_count_loaded);
3568 __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
3569
3570 Comment(";;; }");
3571}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003572
3573void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
Ben Murdochda12d292016-06-02 14:46:10 +01003574 HInvokeFunction* hinstr = instr->hydrogen();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003575 DCHECK(ToRegister(instr->context()).is(cp));
3576 DCHECK(ToRegister(instr->function()).is(a1));
3577 DCHECK(instr->HasPointerMap());
3578
Ben Murdochda12d292016-06-02 14:46:10 +01003579 bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
3580
3581 if (is_tail_call) {
3582 DCHECK(!info()->saves_caller_doubles());
3583 ParameterCount actual(instr->arity());
3584 // It is safe to use t0, t1 and t2 as scratch registers here given that
3585 // we are not going to return to caller function anyway.
3586 PrepareForTailCall(actual, t0, t1, t2);
3587 }
3588
3589 Handle<JSFunction> known_function = hinstr->known_function();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003590 if (known_function.is_null()) {
3591 LPointerMap* pointers = instr->pointer_map();
3592 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdochda12d292016-06-02 14:46:10 +01003593 ParameterCount actual(instr->arity());
3594 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3595 __ InvokeFunction(a1, no_reg, actual, flag, generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003596 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01003597 CallKnownFunction(known_function, hinstr->formal_parameter_count(),
3598 instr->arity(), is_tail_call, instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003599 }
3600}
3601
3602
3603void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3604 DCHECK(ToRegister(instr->result()).is(v0));
3605
3606 if (instr->hydrogen()->IsTailCall()) {
3607 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3608
3609 if (instr->target()->IsConstantOperand()) {
3610 LConstantOperand* target = LConstantOperand::cast(instr->target());
3611 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3612 __ Jump(code, RelocInfo::CODE_TARGET);
3613 } else {
3614 DCHECK(instr->target()->IsRegister());
3615 Register target = ToRegister(instr->target());
3616 __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3617 __ Jump(target);
3618 }
3619 } else {
3620 LPointerMap* pointers = instr->pointer_map();
3621 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3622
3623 if (instr->target()->IsConstantOperand()) {
3624 LConstantOperand* target = LConstantOperand::cast(instr->target());
3625 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3626 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3627 __ Call(code, RelocInfo::CODE_TARGET);
3628 } else {
3629 DCHECK(instr->target()->IsRegister());
3630 Register target = ToRegister(instr->target());
3631 generator.BeforeCall(__ CallSize(target));
3632 __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3633 __ Call(target);
3634 }
3635 generator.AfterCall();
3636 }
3637}
3638
3639
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003640void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3641 DCHECK(ToRegister(instr->context()).is(cp));
3642 DCHECK(ToRegister(instr->constructor()).is(a1));
3643 DCHECK(ToRegister(instr->result()).is(v0));
3644
3645 __ li(a0, Operand(instr->arity()));
Ben Murdoch61f157c2016-09-16 13:49:30 +01003646 __ li(a2, instr->hydrogen()->site());
3647
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003648 ElementsKind kind = instr->hydrogen()->elements_kind();
3649 AllocationSiteOverrideMode override_mode =
3650 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3651 ? DISABLE_ALLOCATION_SITES
3652 : DONT_OVERRIDE;
3653
3654 if (instr->arity() == 0) {
3655 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3656 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3657 } else if (instr->arity() == 1) {
3658 Label done;
3659 if (IsFastPackedElementsKind(kind)) {
3660 Label packed_case;
3661 // We might need a change here,
3662 // look at the first argument.
3663 __ lw(t1, MemOperand(sp, 0));
3664 __ Branch(&packed_case, eq, t1, Operand(zero_reg));
3665
3666 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3667 ArraySingleArgumentConstructorStub stub(isolate(),
3668 holey_kind,
3669 override_mode);
3670 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3671 __ jmp(&done);
3672 __ bind(&packed_case);
3673 }
3674
3675 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3676 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3677 __ bind(&done);
3678 } else {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003679 ArrayNArgumentsConstructorStub stub(isolate());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003680 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3681 }
3682}
3683
3684
3685void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3686 CallRuntime(instr->function(), instr->arity(), instr);
3687}
3688
3689
3690void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3691 Register function = ToRegister(instr->function());
3692 Register code_object = ToRegister(instr->code_object());
3693 __ Addu(code_object, code_object,
3694 Operand(Code::kHeaderSize - kHeapObjectTag));
3695 __ sw(code_object,
3696 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
3697}
3698
3699
3700void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3701 Register result = ToRegister(instr->result());
3702 Register base = ToRegister(instr->base_object());
3703 if (instr->offset()->IsConstantOperand()) {
3704 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3705 __ Addu(result, base, Operand(ToInteger32(offset)));
3706 } else {
3707 Register offset = ToRegister(instr->offset());
3708 __ Addu(result, base, offset);
3709 }
3710}
3711
3712
3713void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3714 Representation representation = instr->representation();
3715
3716 Register object = ToRegister(instr->object());
3717 Register scratch = scratch0();
3718 HObjectAccess access = instr->hydrogen()->access();
3719 int offset = access.offset();
3720
3721 if (access.IsExternalMemory()) {
3722 Register value = ToRegister(instr->value());
3723 MemOperand operand = MemOperand(object, offset);
3724 __ Store(value, operand, representation);
3725 return;
3726 }
3727
3728 __ AssertNotSmi(object);
3729
3730 DCHECK(!representation.IsSmi() ||
3731 !instr->value()->IsConstantOperand() ||
3732 IsSmi(LConstantOperand::cast(instr->value())));
3733 if (representation.IsDouble()) {
3734 DCHECK(access.IsInobject());
3735 DCHECK(!instr->hydrogen()->has_transition());
3736 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3737 DoubleRegister value = ToDoubleRegister(instr->value());
3738 __ sdc1(value, FieldMemOperand(object, offset));
3739 return;
3740 }
3741
3742 if (instr->hydrogen()->has_transition()) {
3743 Handle<Map> transition = instr->hydrogen()->transition_map();
3744 AddDeprecationDependency(transition);
3745 __ li(scratch, Operand(transition));
3746 __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3747 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3748 Register temp = ToRegister(instr->temp());
3749 // Update the write barrier for the map field.
3750 __ RecordWriteForMap(object,
3751 scratch,
3752 temp,
3753 GetRAState(),
3754 kSaveFPRegs);
3755 }
3756 }
3757
3758 // Do the store.
3759 Register value = ToRegister(instr->value());
3760 if (access.IsInobject()) {
3761 MemOperand operand = FieldMemOperand(object, offset);
3762 __ Store(value, operand, representation);
3763 if (instr->hydrogen()->NeedsWriteBarrier()) {
3764 // Update the write barrier for the object for in-object properties.
3765 __ RecordWriteField(object,
3766 offset,
3767 value,
3768 scratch,
3769 GetRAState(),
3770 kSaveFPRegs,
3771 EMIT_REMEMBERED_SET,
3772 instr->hydrogen()->SmiCheckForWriteBarrier(),
3773 instr->hydrogen()->PointersToHereCheckForValue());
3774 }
3775 } else {
3776 __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3777 MemOperand operand = FieldMemOperand(scratch, offset);
3778 __ Store(value, operand, representation);
3779 if (instr->hydrogen()->NeedsWriteBarrier()) {
3780 // Update the write barrier for the properties array.
3781 // object is used as a scratch register.
3782 __ RecordWriteField(scratch,
3783 offset,
3784 value,
3785 object,
3786 GetRAState(),
3787 kSaveFPRegs,
3788 EMIT_REMEMBERED_SET,
3789 instr->hydrogen()->SmiCheckForWriteBarrier(),
3790 instr->hydrogen()->PointersToHereCheckForValue());
3791 }
3792 }
3793}
3794
3795
3796void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3797 DCHECK(ToRegister(instr->context()).is(cp));
3798 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
3799 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
3800
Ben Murdoch61f157c2016-09-16 13:49:30 +01003801 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003802
3803 __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
Ben Murdoch61f157c2016-09-16 13:49:30 +01003804 Handle<Code> ic =
3805 CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
3806 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003807 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3808}
3809
3810
3811void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3812 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
3813 Operand operand(0);
3814 Register reg;
3815 if (instr->index()->IsConstantOperand()) {
3816 operand = ToOperand(instr->index());
3817 reg = ToRegister(instr->length());
3818 cc = CommuteCondition(cc);
3819 } else {
3820 reg = ToRegister(instr->index());
3821 operand = ToOperand(instr->length());
3822 }
3823 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
3824 Label done;
3825 __ Branch(&done, NegateCondition(cc), reg, operand);
3826 __ stop("eliminated bounds check failed");
3827 __ bind(&done);
3828 } else {
3829 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
3830 }
3831}
3832
3833
3834void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
3835 Register external_pointer = ToRegister(instr->elements());
3836 Register key = no_reg;
3837 ElementsKind elements_kind = instr->elements_kind();
3838 bool key_is_constant = instr->key()->IsConstantOperand();
3839 int constant_key = 0;
3840 if (key_is_constant) {
3841 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3842 if (constant_key & 0xF0000000) {
3843 Abort(kArrayIndexConstantValueTooBig);
3844 }
3845 } else {
3846 key = ToRegister(instr->key());
3847 }
3848 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3849 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3850 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3851 int base_offset = instr->base_offset();
3852
3853 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
3854 Register address = scratch0();
3855 FPURegister value(ToDoubleRegister(instr->value()));
3856 if (key_is_constant) {
3857 if (constant_key != 0) {
3858 __ Addu(address, external_pointer,
3859 Operand(constant_key << element_size_shift));
3860 } else {
3861 address = external_pointer;
3862 }
3863 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003864 __ Lsa(address, external_pointer, key, shift_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003865 }
3866
3867 if (elements_kind == FLOAT32_ELEMENTS) {
3868 __ cvt_s_d(double_scratch0(), value);
3869 __ swc1(double_scratch0(), MemOperand(address, base_offset));
3870 } else { // Storing doubles, not floats.
3871 __ sdc1(value, MemOperand(address, base_offset));
3872 }
3873 } else {
3874 Register value(ToRegister(instr->value()));
3875 MemOperand mem_operand = PrepareKeyedOperand(
3876 key, external_pointer, key_is_constant, constant_key,
3877 element_size_shift, shift_size,
3878 base_offset);
3879 switch (elements_kind) {
3880 case UINT8_ELEMENTS:
3881 case UINT8_CLAMPED_ELEMENTS:
3882 case INT8_ELEMENTS:
3883 __ sb(value, mem_operand);
3884 break;
3885 case INT16_ELEMENTS:
3886 case UINT16_ELEMENTS:
3887 __ sh(value, mem_operand);
3888 break;
3889 case INT32_ELEMENTS:
3890 case UINT32_ELEMENTS:
3891 __ sw(value, mem_operand);
3892 break;
3893 case FLOAT32_ELEMENTS:
3894 case FLOAT64_ELEMENTS:
3895 case FAST_DOUBLE_ELEMENTS:
3896 case FAST_ELEMENTS:
3897 case FAST_SMI_ELEMENTS:
3898 case FAST_HOLEY_DOUBLE_ELEMENTS:
3899 case FAST_HOLEY_ELEMENTS:
3900 case FAST_HOLEY_SMI_ELEMENTS:
3901 case DICTIONARY_ELEMENTS:
3902 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3903 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01003904 case FAST_STRING_WRAPPER_ELEMENTS:
3905 case SLOW_STRING_WRAPPER_ELEMENTS:
3906 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003907 UNREACHABLE();
3908 break;
3909 }
3910 }
3911}
3912
3913
3914void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
3915 DoubleRegister value = ToDoubleRegister(instr->value());
3916 Register elements = ToRegister(instr->elements());
3917 Register scratch = scratch0();
3918 Register scratch_1 = scratch1();
3919 DoubleRegister double_scratch = double_scratch0();
3920 bool key_is_constant = instr->key()->IsConstantOperand();
3921 int base_offset = instr->base_offset();
3922 Label not_nan, done;
3923
3924 // Calculate the effective address of the slot in the array to store the
3925 // double value.
3926 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3927 if (key_is_constant) {
3928 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3929 if (constant_key & 0xF0000000) {
3930 Abort(kArrayIndexConstantValueTooBig);
3931 }
3932 __ Addu(scratch, elements,
3933 Operand((constant_key << element_size_shift) + base_offset));
3934 } else {
3935 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3936 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3937 __ Addu(scratch, elements, Operand(base_offset));
3938 __ sll(at, ToRegister(instr->key()), shift_size);
3939 __ Addu(scratch, scratch, at);
3940 }
3941
3942 if (instr->NeedsCanonicalization()) {
3943 Label is_nan;
3944 // Check for NaN. All NaNs must be canonicalized.
3945 __ BranchF(NULL, &is_nan, eq, value, value);
3946 __ Branch(&not_nan);
3947
3948 // Only load canonical NaN if the comparison above set the overflow.
3949 __ bind(&is_nan);
3950 __ LoadRoot(scratch_1, Heap::kNanValueRootIndex);
3951 __ ldc1(double_scratch,
3952 FieldMemOperand(scratch_1, HeapNumber::kValueOffset));
3953 __ sdc1(double_scratch, MemOperand(scratch, 0));
3954 __ Branch(&done);
3955 }
3956
3957 __ bind(&not_nan);
3958 __ sdc1(value, MemOperand(scratch, 0));
3959 __ bind(&done);
3960}
3961
3962
3963void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
3964 Register value = ToRegister(instr->value());
3965 Register elements = ToRegister(instr->elements());
3966 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
3967 : no_reg;
3968 Register scratch = scratch0();
3969 Register store_base = scratch;
3970 int offset = instr->base_offset();
3971
3972 // Do the store.
3973 if (instr->key()->IsConstantOperand()) {
3974 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3975 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3976 offset += ToInteger32(const_operand) * kPointerSize;
3977 store_base = elements;
3978 } else {
3979 // Even though the HLoadKeyed instruction forces the input
3980 // representation for the key to be an integer, the input gets replaced
3981 // during bound check elimination with the index argument to the bounds
3982 // check, which can be tagged, so that case must be handled here, too.
3983 if (instr->hydrogen()->key()->representation().IsSmi()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003984 __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003985 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003986 __ Lsa(scratch, elements, key, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003987 }
3988 }
3989 __ sw(value, MemOperand(store_base, offset));
3990
3991 if (instr->hydrogen()->NeedsWriteBarrier()) {
3992 SmiCheck check_needed =
3993 instr->hydrogen()->value()->type().IsHeapObject()
3994 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3995 // Compute address of modified element and store it into key register.
3996 __ Addu(key, store_base, Operand(offset));
3997 __ RecordWrite(elements,
3998 key,
3999 value,
4000 GetRAState(),
4001 kSaveFPRegs,
4002 EMIT_REMEMBERED_SET,
4003 check_needed,
4004 instr->hydrogen()->PointersToHereCheckForValue());
4005 }
4006}
4007
4008
4009void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4010 // By cases: external, fast double
4011 if (instr->is_fixed_typed_array()) {
4012 DoStoreKeyedExternalArray(instr);
4013 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4014 DoStoreKeyedFixedDoubleArray(instr);
4015 } else {
4016 DoStoreKeyedFixedArray(instr);
4017 }
4018}
4019
4020
4021void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4022 DCHECK(ToRegister(instr->context()).is(cp));
4023 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4024 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4025 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4026
Ben Murdoch61f157c2016-09-16 13:49:30 +01004027 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004028
4029 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
Ben Murdoch61f157c2016-09-16 13:49:30 +01004030 isolate(), instr->language_mode())
4031 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004032 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4033}
4034
4035
4036void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4037 class DeferredMaybeGrowElements final : public LDeferredCode {
4038 public:
4039 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4040 : LDeferredCode(codegen), instr_(instr) {}
4041 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4042 LInstruction* instr() override { return instr_; }
4043
4044 private:
4045 LMaybeGrowElements* instr_;
4046 };
4047
4048 Register result = v0;
4049 DeferredMaybeGrowElements* deferred =
4050 new (zone()) DeferredMaybeGrowElements(this, instr);
4051 LOperand* key = instr->key();
4052 LOperand* current_capacity = instr->current_capacity();
4053
4054 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4055 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4056 DCHECK(key->IsConstantOperand() || key->IsRegister());
4057 DCHECK(current_capacity->IsConstantOperand() ||
4058 current_capacity->IsRegister());
4059
4060 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4061 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4062 int32_t constant_capacity =
4063 ToInteger32(LConstantOperand::cast(current_capacity));
4064 if (constant_key >= constant_capacity) {
4065 // Deferred case.
4066 __ jmp(deferred->entry());
4067 }
4068 } else if (key->IsConstantOperand()) {
4069 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4070 __ Branch(deferred->entry(), le, ToRegister(current_capacity),
4071 Operand(constant_key));
4072 } else if (current_capacity->IsConstantOperand()) {
4073 int32_t constant_capacity =
4074 ToInteger32(LConstantOperand::cast(current_capacity));
4075 __ Branch(deferred->entry(), ge, ToRegister(key),
4076 Operand(constant_capacity));
4077 } else {
4078 __ Branch(deferred->entry(), ge, ToRegister(key),
4079 Operand(ToRegister(current_capacity)));
4080 }
4081
4082 if (instr->elements()->IsRegister()) {
4083 __ mov(result, ToRegister(instr->elements()));
4084 } else {
4085 __ lw(result, ToMemOperand(instr->elements()));
4086 }
4087
4088 __ bind(deferred->exit());
4089}
4090
4091
4092void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4093 // TODO(3095996): Get rid of this. For now, we need to make the
4094 // result register contain a valid pointer because it is already
4095 // contained in the register pointer map.
4096 Register result = v0;
4097 __ mov(result, zero_reg);
4098
4099 // We have to call a stub.
4100 {
4101 PushSafepointRegistersScope scope(this);
4102 if (instr->object()->IsRegister()) {
4103 __ mov(result, ToRegister(instr->object()));
4104 } else {
4105 __ lw(result, ToMemOperand(instr->object()));
4106 }
4107
4108 LOperand* key = instr->key();
4109 if (key->IsConstantOperand()) {
Ben Murdochc5610432016-08-08 18:44:38 +01004110 LConstantOperand* constant_key = LConstantOperand::cast(key);
4111 int32_t int_key = ToInteger32(constant_key);
4112 if (Smi::IsValid(int_key)) {
4113 __ li(a3, Operand(Smi::FromInt(int_key)));
4114 } else {
4115 // We should never get here at runtime because there is a smi check on
4116 // the key before this point.
4117 __ stop("expected smi");
4118 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004119 } else {
4120 __ mov(a3, ToRegister(key));
4121 __ SmiTag(a3);
4122 }
4123
4124 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4125 instr->hydrogen()->kind());
4126 __ mov(a0, result);
4127 __ CallStub(&stub);
4128 RecordSafepointWithLazyDeopt(
4129 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4130 __ StoreToSafepointRegisterSlot(result, result);
4131 }
4132
4133 // Deopt on smi, which means the elements array changed to dictionary mode.
4134 __ SmiTst(result, at);
4135 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
4136}
4137
4138
4139void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4140 Register object_reg = ToRegister(instr->object());
4141 Register scratch = scratch0();
4142
4143 Handle<Map> from_map = instr->original_map();
4144 Handle<Map> to_map = instr->transitioned_map();
4145 ElementsKind from_kind = instr->from_kind();
4146 ElementsKind to_kind = instr->to_kind();
4147
4148 Label not_applicable;
4149 __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4150 __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4151
4152 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4153 Register new_map_reg = ToRegister(instr->new_map_temp());
4154 __ li(new_map_reg, Operand(to_map));
4155 __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4156 // Write barrier.
4157 __ RecordWriteForMap(object_reg,
4158 new_map_reg,
4159 scratch,
4160 GetRAState(),
4161 kDontSaveFPRegs);
4162 } else {
4163 DCHECK(object_reg.is(a0));
4164 DCHECK(ToRegister(instr->context()).is(cp));
4165 PushSafepointRegistersScope scope(this);
4166 __ li(a1, Operand(to_map));
Ben Murdoch61f157c2016-09-16 13:49:30 +01004167 TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004168 __ CallStub(&stub);
4169 RecordSafepointWithRegisters(
4170 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4171 }
4172 __ bind(&not_applicable);
4173}
4174
4175
4176void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4177 Register object = ToRegister(instr->object());
4178 Register temp = ToRegister(instr->temp());
4179 Label no_memento_found;
Ben Murdochda12d292016-06-02 14:46:10 +01004180 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004181 DeoptimizeIf(al, instr);
4182 __ bind(&no_memento_found);
4183}
4184
4185
4186void LCodeGen::DoStringAdd(LStringAdd* instr) {
4187 DCHECK(ToRegister(instr->context()).is(cp));
4188 DCHECK(ToRegister(instr->left()).is(a1));
4189 DCHECK(ToRegister(instr->right()).is(a0));
4190 StringAddStub stub(isolate(),
4191 instr->hydrogen()->flags(),
4192 instr->hydrogen()->pretenure_flag());
4193 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4194}
4195
4196
4197void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4198 class DeferredStringCharCodeAt final : public LDeferredCode {
4199 public:
4200 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4201 : LDeferredCode(codegen), instr_(instr) { }
4202 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4203 LInstruction* instr() override { return instr_; }
4204
4205 private:
4206 LStringCharCodeAt* instr_;
4207 };
4208
4209 DeferredStringCharCodeAt* deferred =
4210 new(zone()) DeferredStringCharCodeAt(this, instr);
4211 StringCharLoadGenerator::Generate(masm(),
4212 ToRegister(instr->string()),
4213 ToRegister(instr->index()),
4214 ToRegister(instr->result()),
4215 deferred->entry());
4216 __ bind(deferred->exit());
4217}
4218
4219
4220void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4221 Register string = ToRegister(instr->string());
4222 Register result = ToRegister(instr->result());
4223 Register scratch = scratch0();
4224
4225 // TODO(3095996): Get rid of this. For now, we need to make the
4226 // result register contain a valid pointer because it is already
4227 // contained in the register pointer map.
4228 __ mov(result, zero_reg);
4229
4230 PushSafepointRegistersScope scope(this);
4231 __ push(string);
4232 // Push the index as a smi. This is safe because of the checks in
4233 // DoStringCharCodeAt above.
4234 if (instr->index()->IsConstantOperand()) {
4235 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4236 __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4237 __ push(scratch);
4238 } else {
4239 Register index = ToRegister(instr->index());
4240 __ SmiTag(index);
4241 __ push(index);
4242 }
4243 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4244 instr->context());
4245 __ AssertSmi(v0);
4246 __ SmiUntag(v0);
4247 __ StoreToSafepointRegisterSlot(v0, result);
4248}
4249
4250
4251void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4252 class DeferredStringCharFromCode final : public LDeferredCode {
4253 public:
4254 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4255 : LDeferredCode(codegen), instr_(instr) { }
4256 void Generate() override {
4257 codegen()->DoDeferredStringCharFromCode(instr_);
4258 }
4259 LInstruction* instr() override { return instr_; }
4260
4261 private:
4262 LStringCharFromCode* instr_;
4263 };
4264
4265 DeferredStringCharFromCode* deferred =
4266 new(zone()) DeferredStringCharFromCode(this, instr);
4267
4268 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4269 Register char_code = ToRegister(instr->char_code());
4270 Register result = ToRegister(instr->result());
4271 Register scratch = scratch0();
4272 DCHECK(!char_code.is(result));
4273
4274 __ Branch(deferred->entry(), hi,
4275 char_code, Operand(String::kMaxOneByteCharCode));
4276 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004277 __ Lsa(result, result, char_code, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004278 __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4279 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4280 __ Branch(deferred->entry(), eq, result, Operand(scratch));
4281 __ bind(deferred->exit());
4282}
4283
4284
4285void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4286 Register char_code = ToRegister(instr->char_code());
4287 Register result = ToRegister(instr->result());
4288
4289 // TODO(3095996): Get rid of this. For now, we need to make the
4290 // result register contain a valid pointer because it is already
4291 // contained in the register pointer map.
4292 __ mov(result, zero_reg);
4293
4294 PushSafepointRegistersScope scope(this);
4295 __ SmiTag(char_code);
4296 __ push(char_code);
4297 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4298 instr->context());
4299 __ StoreToSafepointRegisterSlot(v0, result);
4300}
4301
4302
4303void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4304 LOperand* input = instr->value();
4305 DCHECK(input->IsRegister() || input->IsStackSlot());
4306 LOperand* output = instr->result();
4307 DCHECK(output->IsDoubleRegister());
4308 FPURegister single_scratch = double_scratch0().low();
4309 if (input->IsStackSlot()) {
4310 Register scratch = scratch0();
4311 __ lw(scratch, ToMemOperand(input));
4312 __ mtc1(scratch, single_scratch);
4313 } else {
4314 __ mtc1(ToRegister(input), single_scratch);
4315 }
4316 __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4317}
4318
4319
4320void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4321 LOperand* input = instr->value();
4322 LOperand* output = instr->result();
4323
4324 __ Cvt_d_uw(ToDoubleRegister(output), ToRegister(input), f22);
4325}
4326
4327
4328void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4329 class DeferredNumberTagI final : public LDeferredCode {
4330 public:
4331 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4332 : LDeferredCode(codegen), instr_(instr) { }
4333 void Generate() override {
4334 codegen()->DoDeferredNumberTagIU(instr_,
4335 instr_->value(),
4336 instr_->temp1(),
4337 instr_->temp2(),
4338 SIGNED_INT32);
4339 }
4340 LInstruction* instr() override { return instr_; }
4341
4342 private:
4343 LNumberTagI* instr_;
4344 };
4345
4346 Register src = ToRegister(instr->value());
4347 Register dst = ToRegister(instr->result());
4348 Register overflow = scratch0();
4349
4350 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4351 __ SmiTagCheckOverflow(dst, src, overflow);
4352 __ BranchOnOverflow(deferred->entry(), overflow);
4353 __ bind(deferred->exit());
4354}
4355
4356
4357void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4358 class DeferredNumberTagU final : public LDeferredCode {
4359 public:
4360 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4361 : LDeferredCode(codegen), instr_(instr) { }
4362 void Generate() override {
4363 codegen()->DoDeferredNumberTagIU(instr_,
4364 instr_->value(),
4365 instr_->temp1(),
4366 instr_->temp2(),
4367 UNSIGNED_INT32);
4368 }
4369 LInstruction* instr() override { return instr_; }
4370
4371 private:
4372 LNumberTagU* instr_;
4373 };
4374
4375 Register input = ToRegister(instr->value());
4376 Register result = ToRegister(instr->result());
4377
4378 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4379 __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4380 __ SmiTag(result, input);
4381 __ bind(deferred->exit());
4382}
4383
4384
4385void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4386 LOperand* value,
4387 LOperand* temp1,
4388 LOperand* temp2,
4389 IntegerSignedness signedness) {
4390 Label done, slow;
4391 Register src = ToRegister(value);
4392 Register dst = ToRegister(instr->result());
4393 Register tmp1 = scratch0();
4394 Register tmp2 = ToRegister(temp1);
4395 Register tmp3 = ToRegister(temp2);
4396 DoubleRegister dbl_scratch = double_scratch0();
4397
4398 if (signedness == SIGNED_INT32) {
4399 // There was overflow, so bits 30 and 31 of the original integer
4400 // disagree. Try to allocate a heap number in new space and store
4401 // the value in there. If that fails, call the runtime system.
4402 if (dst.is(src)) {
4403 __ SmiUntag(src, dst);
4404 __ Xor(src, src, Operand(0x80000000));
4405 }
4406 __ mtc1(src, dbl_scratch);
4407 __ cvt_d_w(dbl_scratch, dbl_scratch);
4408 } else {
4409 __ Cvt_d_uw(dbl_scratch, src, f22);
4410 }
4411
4412 if (FLAG_inline_new) {
4413 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
Ben Murdochc5610432016-08-08 18:44:38 +01004414 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004415 __ Branch(&done);
4416 }
4417
4418 // Slow case: Call the runtime system to do the number allocation.
4419 __ bind(&slow);
4420 {
4421 // TODO(3095996): Put a valid pointer value in the stack slot where the
4422 // result register is stored, as this register is in the pointer map, but
4423 // contains an integer value.
4424 __ mov(dst, zero_reg);
4425
4426 // Preserve the value of all registers.
4427 PushSafepointRegistersScope scope(this);
4428
4429 // NumberTagI and NumberTagD use the context from the frame, rather than
4430 // the environment's HContext or HInlinedContext value.
4431 // They only call Runtime::kAllocateHeapNumber.
4432 // The corresponding HChange instructions are added in a phase that does
4433 // not have easy access to the local context.
4434 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4435 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4436 RecordSafepointWithRegisters(
4437 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004438 __ StoreToSafepointRegisterSlot(v0, dst);
4439 }
4440
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004441 // Done. Put the value in dbl_scratch into the value of the allocated heap
4442 // number.
4443 __ bind(&done);
Ben Murdochc5610432016-08-08 18:44:38 +01004444 __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004445}
4446
4447
4448void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4449 class DeferredNumberTagD final : public LDeferredCode {
4450 public:
4451 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4452 : LDeferredCode(codegen), instr_(instr) { }
4453 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4454 LInstruction* instr() override { return instr_; }
4455
4456 private:
4457 LNumberTagD* instr_;
4458 };
4459
4460 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4461 Register scratch = scratch0();
4462 Register reg = ToRegister(instr->result());
4463 Register temp1 = ToRegister(instr->temp());
4464 Register temp2 = ToRegister(instr->temp2());
4465
4466 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4467 if (FLAG_inline_new) {
4468 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
Ben Murdochc5610432016-08-08 18:44:38 +01004469 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004470 } else {
4471 __ Branch(deferred->entry());
4472 }
4473 __ bind(deferred->exit());
Ben Murdochc5610432016-08-08 18:44:38 +01004474 __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004475 // Now that we have finished with the object's real address tag it
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004476}
4477
4478
4479void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4480 // TODO(3095996): Get rid of this. For now, we need to make the
4481 // result register contain a valid pointer because it is already
4482 // contained in the register pointer map.
4483 Register reg = ToRegister(instr->result());
4484 __ mov(reg, zero_reg);
4485
4486 PushSafepointRegistersScope scope(this);
4487 // NumberTagI and NumberTagD use the context from the frame, rather than
4488 // the environment's HContext or HInlinedContext value.
4489 // They only call Runtime::kAllocateHeapNumber.
4490 // The corresponding HChange instructions are added in a phase that does
4491 // not have easy access to the local context.
4492 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4493 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4494 RecordSafepointWithRegisters(
4495 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004496 __ StoreToSafepointRegisterSlot(v0, reg);
4497}
4498
4499
4500void LCodeGen::DoSmiTag(LSmiTag* instr) {
4501 HChange* hchange = instr->hydrogen();
4502 Register input = ToRegister(instr->value());
4503 Register output = ToRegister(instr->result());
4504 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4505 hchange->value()->CheckFlag(HValue::kUint32)) {
4506 __ And(at, input, Operand(0xc0000000));
4507 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4508 }
4509 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4510 !hchange->value()->CheckFlag(HValue::kUint32)) {
4511 __ SmiTagCheckOverflow(output, input, at);
4512 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4513 } else {
4514 __ SmiTag(output, input);
4515 }
4516}
4517
4518
4519void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4520 Register scratch = scratch0();
4521 Register input = ToRegister(instr->value());
4522 Register result = ToRegister(instr->result());
4523 if (instr->needs_check()) {
4524 STATIC_ASSERT(kHeapObjectTag == 1);
4525 // If the input is a HeapObject, value of scratch won't be zero.
4526 __ And(scratch, input, Operand(kHeapObjectTag));
4527 __ SmiUntag(result, input);
4528 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
4529 } else {
4530 __ SmiUntag(result, input);
4531 }
4532}
4533
4534
4535void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4536 DoubleRegister result_reg,
4537 NumberUntagDMode mode) {
4538 bool can_convert_undefined_to_nan =
4539 instr->hydrogen()->can_convert_undefined_to_nan();
4540 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4541
4542 Register scratch = scratch0();
4543 Label convert, load_smi, done;
4544 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4545 // Smi check.
4546 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4547 // Heap number map check.
4548 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4549 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4550 if (can_convert_undefined_to_nan) {
4551 __ Branch(&convert, ne, scratch, Operand(at));
4552 } else {
4553 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
4554 Operand(at));
4555 }
4556 // Load heap number.
4557 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4558 if (deoptimize_on_minus_zero) {
4559 __ mfc1(at, result_reg.low());
4560 __ Branch(&done, ne, at, Operand(zero_reg));
4561 __ Mfhc1(scratch, result_reg);
4562 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
4563 Operand(HeapNumber::kSignMask));
4564 }
4565 __ Branch(&done);
4566 if (can_convert_undefined_to_nan) {
4567 __ bind(&convert);
4568 // Convert undefined (and hole) to NaN.
4569 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4570 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
4571 Operand(at));
4572 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4573 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4574 __ Branch(&done);
4575 }
4576 } else {
4577 __ SmiUntag(scratch, input_reg);
4578 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4579 }
4580 // Smi to double register conversion
4581 __ bind(&load_smi);
4582 // scratch: untagged value of input_reg
4583 __ mtc1(scratch, result_reg);
4584 __ cvt_d_w(result_reg, result_reg);
4585 __ bind(&done);
4586}
4587
4588
4589void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4590 Register input_reg = ToRegister(instr->value());
4591 Register scratch1 = scratch0();
4592 Register scratch2 = ToRegister(instr->temp());
4593 DoubleRegister double_scratch = double_scratch0();
4594 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4595
4596 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4597 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4598
4599 Label done;
4600
4601 // The input is a tagged HeapObject.
4602 // Heap number map check.
4603 __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4604 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4605 // This 'at' value and scratch1 map value are used for tests in both clauses
4606 // of the if.
4607
4608 if (instr->truncating()) {
4609 // Performs a truncating conversion of a floating point number as used by
4610 // the JS bitwise operations.
4611 Label no_heap_number, check_bools, check_false;
4612 // Check HeapNumber map.
4613 __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
4614 __ mov(scratch2, input_reg); // In delay slot.
4615 __ TruncateHeapNumberToI(input_reg, scratch2);
4616 __ Branch(&done);
4617
4618 // Check for Oddballs. Undefined/False is converted to zero and True to one
4619 // for truncating conversions.
4620 __ bind(&no_heap_number);
4621 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4622 __ Branch(&check_bools, ne, input_reg, Operand(at));
4623 DCHECK(ToRegister(instr->result()).is(input_reg));
4624 __ Branch(USE_DELAY_SLOT, &done);
4625 __ mov(input_reg, zero_reg); // In delay slot.
4626
4627 __ bind(&check_bools);
4628 __ LoadRoot(at, Heap::kTrueValueRootIndex);
4629 __ Branch(&check_false, ne, scratch2, Operand(at));
4630 __ Branch(USE_DELAY_SLOT, &done);
4631 __ li(input_reg, Operand(1)); // In delay slot.
4632
4633 __ bind(&check_false);
4634 __ LoadRoot(at, Heap::kFalseValueRootIndex);
4635 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
4636 scratch2, Operand(at));
4637 __ Branch(USE_DELAY_SLOT, &done);
4638 __ mov(input_reg, zero_reg); // In delay slot.
4639 } else {
4640 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
4641 Operand(at));
4642
4643 // Load the double value.
4644 __ ldc1(double_scratch,
4645 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4646
4647 Register except_flag = scratch2;
4648 __ EmitFPUTruncate(kRoundToZero,
4649 input_reg,
4650 double_scratch,
4651 scratch1,
4652 double_scratch2,
4653 except_flag,
4654 kCheckForInexactConversion);
4655
4656 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4657 Operand(zero_reg));
4658
4659 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4660 __ Branch(&done, ne, input_reg, Operand(zero_reg));
4661
4662 __ Mfhc1(scratch1, double_scratch);
4663 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4664 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
4665 Operand(zero_reg));
4666 }
4667 }
4668 __ bind(&done);
4669}
4670
4671
4672void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4673 class DeferredTaggedToI final : public LDeferredCode {
4674 public:
4675 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4676 : LDeferredCode(codegen), instr_(instr) { }
4677 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4678 LInstruction* instr() override { return instr_; }
4679
4680 private:
4681 LTaggedToI* instr_;
4682 };
4683
4684 LOperand* input = instr->value();
4685 DCHECK(input->IsRegister());
4686 DCHECK(input->Equals(instr->result()));
4687
4688 Register input_reg = ToRegister(input);
4689
4690 if (instr->hydrogen()->value()->representation().IsSmi()) {
4691 __ SmiUntag(input_reg);
4692 } else {
4693 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4694
4695 // Let the deferred code handle the HeapObject case.
4696 __ JumpIfNotSmi(input_reg, deferred->entry());
4697
4698 // Smi to int32 conversion.
4699 __ SmiUntag(input_reg);
4700 __ bind(deferred->exit());
4701 }
4702}
4703
4704
4705void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4706 LOperand* input = instr->value();
4707 DCHECK(input->IsRegister());
4708 LOperand* result = instr->result();
4709 DCHECK(result->IsDoubleRegister());
4710
4711 Register input_reg = ToRegister(input);
4712 DoubleRegister result_reg = ToDoubleRegister(result);
4713
4714 HValue* value = instr->hydrogen()->value();
4715 NumberUntagDMode mode = value->representation().IsSmi()
4716 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4717
4718 EmitNumberUntagD(instr, input_reg, result_reg, mode);
4719}
4720
4721
4722void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4723 Register result_reg = ToRegister(instr->result());
4724 Register scratch1 = scratch0();
4725 DoubleRegister double_input = ToDoubleRegister(instr->value());
4726
4727 if (instr->truncating()) {
4728 __ TruncateDoubleToI(result_reg, double_input);
4729 } else {
4730 Register except_flag = LCodeGen::scratch1();
4731
4732 __ EmitFPUTruncate(kRoundToMinusInf,
4733 result_reg,
4734 double_input,
4735 scratch1,
4736 double_scratch0(),
4737 except_flag,
4738 kCheckForInexactConversion);
4739
4740 // Deopt if the operation did not succeed (except_flag != 0).
4741 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4742 Operand(zero_reg));
4743
4744 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4745 Label done;
4746 __ Branch(&done, ne, result_reg, Operand(zero_reg));
4747 __ Mfhc1(scratch1, double_input);
4748 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4749 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
4750 Operand(zero_reg));
4751 __ bind(&done);
4752 }
4753 }
4754}
4755
4756
4757void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4758 Register result_reg = ToRegister(instr->result());
4759 Register scratch1 = LCodeGen::scratch0();
4760 DoubleRegister double_input = ToDoubleRegister(instr->value());
4761
4762 if (instr->truncating()) {
4763 __ TruncateDoubleToI(result_reg, double_input);
4764 } else {
4765 Register except_flag = LCodeGen::scratch1();
4766
4767 __ EmitFPUTruncate(kRoundToMinusInf,
4768 result_reg,
4769 double_input,
4770 scratch1,
4771 double_scratch0(),
4772 except_flag,
4773 kCheckForInexactConversion);
4774
4775 // Deopt if the operation did not succeed (except_flag != 0).
4776 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4777 Operand(zero_reg));
4778
4779 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4780 Label done;
4781 __ Branch(&done, ne, result_reg, Operand(zero_reg));
4782 __ Mfhc1(scratch1, double_input);
4783 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4784 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
4785 Operand(zero_reg));
4786 __ bind(&done);
4787 }
4788 }
4789 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
4790 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch1, Operand(zero_reg));
4791}
4792
4793
4794void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4795 LOperand* input = instr->value();
4796 __ SmiTst(ToRegister(input), at);
4797 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
4798}
4799
4800
4801void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4802 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4803 LOperand* input = instr->value();
4804 __ SmiTst(ToRegister(input), at);
4805 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
4806 }
4807}
4808
4809
4810void LCodeGen::DoCheckArrayBufferNotNeutered(
4811 LCheckArrayBufferNotNeutered* instr) {
4812 Register view = ToRegister(instr->view());
4813 Register scratch = scratch0();
4814
4815 __ lw(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
4816 __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
4817 __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
4818 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg));
4819}
4820
4821
4822void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4823 Register input = ToRegister(instr->value());
4824 Register scratch = scratch0();
4825
4826 __ GetObjectType(input, scratch, scratch);
4827
4828 if (instr->hydrogen()->is_interval_check()) {
4829 InstanceType first;
4830 InstanceType last;
4831 instr->hydrogen()->GetCheckInterval(&first, &last);
4832
4833 // If there is only one type in the interval check for equality.
4834 if (first == last) {
4835 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
4836 Operand(first));
4837 } else {
4838 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
4839 Operand(first));
4840 // Omit check for the last type.
4841 if (last != LAST_TYPE) {
4842 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
4843 Operand(last));
4844 }
4845 }
4846 } else {
4847 uint8_t mask;
4848 uint8_t tag;
4849 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4850
4851 if (base::bits::IsPowerOfTwo32(mask)) {
4852 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
4853 __ And(at, scratch, mask);
4854 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
4855 at, Operand(zero_reg));
4856 } else {
4857 __ And(scratch, scratch, Operand(mask));
4858 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
4859 Operand(tag));
4860 }
4861 }
4862}
4863
4864
4865void LCodeGen::DoCheckValue(LCheckValue* instr) {
4866 Register reg = ToRegister(instr->value());
4867 Handle<HeapObject> object = instr->hydrogen()->object().handle();
4868 AllowDeferredHandleDereference smi_check;
4869 if (isolate()->heap()->InNewSpace(*object)) {
4870 Register reg = ToRegister(instr->value());
4871 Handle<Cell> cell = isolate()->factory()->NewCell(object);
4872 __ li(at, Operand(cell));
4873 __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
4874 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
4875 } else {
4876 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
4877 }
4878}
4879
4880
4881void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
4882 {
4883 PushSafepointRegistersScope scope(this);
4884 __ push(object);
4885 __ mov(cp, zero_reg);
4886 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
4887 RecordSafepointWithRegisters(
4888 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
4889 __ StoreToSafepointRegisterSlot(v0, scratch0());
4890 }
4891 __ SmiTst(scratch0(), at);
4892 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
4893 Operand(zero_reg));
4894}
4895
4896
4897void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4898 class DeferredCheckMaps final : public LDeferredCode {
4899 public:
4900 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
4901 : LDeferredCode(codegen), instr_(instr), object_(object) {
4902 SetExit(check_maps());
4903 }
4904 void Generate() override {
4905 codegen()->DoDeferredInstanceMigration(instr_, object_);
4906 }
4907 Label* check_maps() { return &check_maps_; }
4908 LInstruction* instr() override { return instr_; }
4909
4910 private:
4911 LCheckMaps* instr_;
4912 Label check_maps_;
4913 Register object_;
4914 };
4915
4916 if (instr->hydrogen()->IsStabilityCheck()) {
4917 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4918 for (int i = 0; i < maps->size(); ++i) {
4919 AddStabilityDependency(maps->at(i).handle());
4920 }
4921 return;
4922 }
4923
4924 Register map_reg = scratch0();
4925 LOperand* input = instr->value();
4926 DCHECK(input->IsRegister());
4927 Register reg = ToRegister(input);
4928 __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
4929
4930 DeferredCheckMaps* deferred = NULL;
4931 if (instr->hydrogen()->HasMigrationTarget()) {
4932 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
4933 __ bind(deferred->check_maps());
4934 }
4935
4936 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4937 Label success;
4938 for (int i = 0; i < maps->size() - 1; i++) {
4939 Handle<Map> map = maps->at(i).handle();
4940 __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
4941 }
4942 Handle<Map> map = maps->at(maps->size() - 1).handle();
4943 // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
4944 if (instr->hydrogen()->HasMigrationTarget()) {
4945 __ Branch(deferred->entry(), ne, map_reg, Operand(map));
4946 } else {
4947 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
4948 }
4949
4950 __ bind(&success);
4951}
4952
4953
4954void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4955 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4956 Register result_reg = ToRegister(instr->result());
4957 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
4958 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4959}
4960
4961
4962void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4963 Register unclamped_reg = ToRegister(instr->unclamped());
4964 Register result_reg = ToRegister(instr->result());
4965 __ ClampUint8(result_reg, unclamped_reg);
4966}
4967
4968
4969void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4970 Register scratch = scratch0();
4971 Register input_reg = ToRegister(instr->unclamped());
4972 Register result_reg = ToRegister(instr->result());
4973 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
4974 Label is_smi, done, heap_number;
4975
4976 // Both smi and heap number cases are handled.
4977 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
4978
4979 // Check for heap number
4980 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4981 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
4982
4983 // Check for undefined. Undefined is converted to zero for clamping
4984 // conversions.
4985 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
4986 Operand(factory()->undefined_value()));
4987 __ mov(result_reg, zero_reg);
4988 __ jmp(&done);
4989
4990 // Heap number
4991 __ bind(&heap_number);
4992 __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
4993 HeapNumber::kValueOffset));
4994 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4995 __ jmp(&done);
4996
4997 __ bind(&is_smi);
4998 __ ClampUint8(result_reg, scratch);
4999
5000 __ bind(&done);
5001}
5002
5003
5004void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5005 DoubleRegister value_reg = ToDoubleRegister(instr->value());
5006 Register result_reg = ToRegister(instr->result());
5007 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5008 __ FmoveHigh(result_reg, value_reg);
5009 } else {
5010 __ FmoveLow(result_reg, value_reg);
5011 }
5012}
5013
5014
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005015void LCodeGen::DoAllocate(LAllocate* instr) {
5016 class DeferredAllocate final : public LDeferredCode {
5017 public:
5018 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5019 : LDeferredCode(codegen), instr_(instr) { }
5020 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5021 LInstruction* instr() override { return instr_; }
5022
5023 private:
5024 LAllocate* instr_;
5025 };
5026
5027 DeferredAllocate* deferred =
5028 new(zone()) DeferredAllocate(this, instr);
5029
5030 Register result = ToRegister(instr->result());
5031 Register scratch = ToRegister(instr->temp1());
5032 Register scratch2 = ToRegister(instr->temp2());
5033
5034 // Allocate memory for the object.
Ben Murdochc5610432016-08-08 18:44:38 +01005035 AllocationFlags flags = NO_ALLOCATION_FLAGS;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005036 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5037 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5038 }
5039 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5040 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5041 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5042 }
Ben Murdochc5610432016-08-08 18:44:38 +01005043
5044 if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5045 flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
5046 }
5047 DCHECK(!instr->hydrogen()->IsAllocationFolded());
5048
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005049 if (instr->size()->IsConstantOperand()) {
5050 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5051 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5052 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5053 } else {
5054 Register size = ToRegister(instr->size());
5055 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5056 }
5057
5058 __ bind(deferred->exit());
5059
5060 if (instr->hydrogen()->MustPrefillWithFiller()) {
5061 STATIC_ASSERT(kHeapObjectTag == 1);
5062 if (instr->size()->IsConstantOperand()) {
5063 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5064 __ li(scratch, Operand(size - kHeapObjectTag));
5065 } else {
5066 __ Subu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5067 }
5068 __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5069 Label loop;
5070 __ bind(&loop);
5071 __ Subu(scratch, scratch, Operand(kPointerSize));
5072 __ Addu(at, result, Operand(scratch));
5073 __ sw(scratch2, MemOperand(at));
5074 __ Branch(&loop, ge, scratch, Operand(zero_reg));
5075 }
5076}
5077
5078
5079void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5080 Register result = ToRegister(instr->result());
5081
5082 // TODO(3095996): Get rid of this. For now, we need to make the
5083 // result register contain a valid pointer because it is already
5084 // contained in the register pointer map.
5085 __ mov(result, zero_reg);
5086
5087 PushSafepointRegistersScope scope(this);
5088 if (instr->size()->IsRegister()) {
5089 Register size = ToRegister(instr->size());
5090 DCHECK(!size.is(result));
5091 __ SmiTag(size);
5092 __ push(size);
5093 } else {
5094 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5095 if (size >= 0 && size <= Smi::kMaxValue) {
5096 __ Push(Smi::FromInt(size));
5097 } else {
5098 // We should never get here at runtime => abort
5099 __ stop("invalid allocation size");
5100 return;
5101 }
5102 }
5103
5104 int flags = AllocateDoubleAlignFlag::encode(
5105 instr->hydrogen()->MustAllocateDoubleAligned());
5106 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5107 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5108 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5109 } else {
5110 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5111 }
5112 __ Push(Smi::FromInt(flags));
5113
5114 CallRuntimeFromDeferred(
5115 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5116 __ StoreToSafepointRegisterSlot(v0, result);
Ben Murdochc5610432016-08-08 18:44:38 +01005117
5118 if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5119 AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
5120 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5121 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5122 allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
5123 }
5124 // If the allocation folding dominator allocate triggered a GC, allocation
5125 // happend in the runtime. We have to reset the top pointer to virtually
5126 // undo the allocation.
5127 ExternalReference allocation_top =
5128 AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
5129 Register top_address = scratch0();
5130 __ Subu(v0, v0, Operand(kHeapObjectTag));
5131 __ li(top_address, Operand(allocation_top));
5132 __ sw(v0, MemOperand(top_address));
5133 __ Addu(v0, v0, Operand(kHeapObjectTag));
5134 }
5135}
5136
5137void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
5138 DCHECK(instr->hydrogen()->IsAllocationFolded());
5139 DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
5140 Register result = ToRegister(instr->result());
5141 Register scratch1 = ToRegister(instr->temp1());
5142 Register scratch2 = ToRegister(instr->temp2());
5143
5144 AllocationFlags flags = ALLOCATION_FOLDED;
5145 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5146 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5147 }
5148 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5149 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5150 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5151 }
5152 if (instr->size()->IsConstantOperand()) {
5153 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5154 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5155 __ FastAllocate(size, result, scratch1, scratch2, flags);
5156 } else {
5157 Register size = ToRegister(instr->size());
5158 __ FastAllocate(size, result, scratch1, scratch2, flags);
5159 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005160}
5161
5162
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005163void LCodeGen::DoTypeof(LTypeof* instr) {
5164 DCHECK(ToRegister(instr->value()).is(a3));
5165 DCHECK(ToRegister(instr->result()).is(v0));
5166 Label end, do_call;
5167 Register value_register = ToRegister(instr->value());
5168 __ JumpIfNotSmi(value_register, &do_call);
5169 __ li(v0, Operand(isolate()->factory()->number_string()));
5170 __ jmp(&end);
5171 __ bind(&do_call);
5172 TypeofStub stub(isolate());
5173 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5174 __ bind(&end);
5175}
5176
5177
5178void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5179 Register input = ToRegister(instr->value());
5180
5181 Register cmp1 = no_reg;
5182 Operand cmp2 = Operand(no_reg);
5183
5184 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5185 instr->FalseLabel(chunk_),
5186 input,
5187 instr->type_literal(),
5188 &cmp1,
5189 &cmp2);
5190
5191 DCHECK(cmp1.is_valid());
5192 DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
5193
5194 if (final_branch_condition != kNoCondition) {
5195 EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5196 }
5197}
5198
5199
5200Condition LCodeGen::EmitTypeofIs(Label* true_label,
5201 Label* false_label,
5202 Register input,
5203 Handle<String> type_name,
5204 Register* cmp1,
5205 Operand* cmp2) {
5206 // This function utilizes the delay slot heavily. This is used to load
5207 // values that are always usable without depending on the type of the input
5208 // register.
5209 Condition final_branch_condition = kNoCondition;
5210 Register scratch = scratch0();
5211 Factory* factory = isolate()->factory();
5212 if (String::Equals(type_name, factory->number_string())) {
5213 __ JumpIfSmi(input, true_label);
5214 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5215 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5216 *cmp1 = input;
5217 *cmp2 = Operand(at);
5218 final_branch_condition = eq;
5219
5220 } else if (String::Equals(type_name, factory->string_string())) {
5221 __ JumpIfSmi(input, false_label);
5222 __ GetObjectType(input, input, scratch);
5223 *cmp1 = scratch;
5224 *cmp2 = Operand(FIRST_NONSTRING_TYPE);
5225 final_branch_condition = lt;
5226
5227 } else if (String::Equals(type_name, factory->symbol_string())) {
5228 __ JumpIfSmi(input, false_label);
5229 __ GetObjectType(input, input, scratch);
5230 *cmp1 = scratch;
5231 *cmp2 = Operand(SYMBOL_TYPE);
5232 final_branch_condition = eq;
5233
5234 } else if (String::Equals(type_name, factory->boolean_string())) {
5235 __ LoadRoot(at, Heap::kTrueValueRootIndex);
5236 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5237 __ LoadRoot(at, Heap::kFalseValueRootIndex);
5238 *cmp1 = at;
5239 *cmp2 = Operand(input);
5240 final_branch_condition = eq;
5241
5242 } else if (String::Equals(type_name, factory->undefined_string())) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005243 __ LoadRoot(at, Heap::kNullValueRootIndex);
5244 __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005245 // The first instruction of JumpIfSmi is an And - it is safe in the delay
5246 // slot.
5247 __ JumpIfSmi(input, false_label);
5248 // Check for undetectable objects => true.
5249 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5250 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5251 __ And(at, at, 1 << Map::kIsUndetectable);
5252 *cmp1 = at;
5253 *cmp2 = Operand(zero_reg);
5254 final_branch_condition = ne;
5255
5256 } else if (String::Equals(type_name, factory->function_string())) {
5257 __ JumpIfSmi(input, false_label);
5258 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5259 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5260 __ And(scratch, scratch,
5261 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5262 *cmp1 = scratch;
5263 *cmp2 = Operand(1 << Map::kIsCallable);
5264 final_branch_condition = eq;
5265
5266 } else if (String::Equals(type_name, factory->object_string())) {
5267 __ JumpIfSmi(input, false_label);
5268 __ LoadRoot(at, Heap::kNullValueRootIndex);
5269 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5270 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5271 __ GetObjectType(input, scratch, scratch1());
5272 __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
5273 // Check for callable or undetectable objects => false.
5274 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5275 __ And(at, scratch,
5276 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5277 *cmp1 = at;
5278 *cmp2 = Operand(zero_reg);
5279 final_branch_condition = eq;
5280
5281// clang-format off
5282#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5283 } else if (String::Equals(type_name, factory->type##_string())) { \
5284 __ JumpIfSmi(input, false_label); \
5285 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); \
5286 __ LoadRoot(at, Heap::k##Type##MapRootIndex); \
5287 *cmp1 = input; \
5288 *cmp2 = Operand(at); \
5289 final_branch_condition = eq;
5290 SIMD128_TYPES(SIMD128_TYPE)
5291#undef SIMD128_TYPE
5292 // clang-format on
5293
5294 } else {
5295 *cmp1 = at;
5296 *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
5297 __ Branch(false_label);
5298 }
5299
5300 return final_branch_condition;
5301}
5302
5303
5304void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5305 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5306 // Ensure that we have enough space after the previous lazy-bailout
5307 // instruction for patching the code here.
5308 int current_pc = masm()->pc_offset();
5309 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5310 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5311 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5312 while (padding_size > 0) {
5313 __ nop();
5314 padding_size -= Assembler::kInstrSize;
5315 }
5316 }
5317 }
5318 last_lazy_deopt_pc_ = masm()->pc_offset();
5319}
5320
5321
5322void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5323 last_lazy_deopt_pc_ = masm()->pc_offset();
5324 DCHECK(instr->HasEnvironment());
5325 LEnvironment* env = instr->environment();
5326 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5327 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5328}
5329
5330
5331void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5332 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5333 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5334 // needed return address), even though the implementation of LAZY and EAGER is
5335 // now identical. When LAZY is eventually completely folded into EAGER, remove
5336 // the special case below.
5337 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5338 type = Deoptimizer::LAZY;
5339 }
5340
5341 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
5342 Operand(zero_reg));
5343}
5344
5345
5346void LCodeGen::DoDummy(LDummy* instr) {
5347 // Nothing to see here, move on!
5348}
5349
5350
5351void LCodeGen::DoDummyUse(LDummyUse* instr) {
5352 // Nothing to see here, move on!
5353}
5354
5355
5356void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5357 PushSafepointRegistersScope scope(this);
5358 LoadContextFromDeferred(instr->context());
5359 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5360 RecordSafepointWithLazyDeopt(
5361 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5362 DCHECK(instr->HasEnvironment());
5363 LEnvironment* env = instr->environment();
5364 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5365}
5366
5367
5368void LCodeGen::DoStackCheck(LStackCheck* instr) {
5369 class DeferredStackCheck final : public LDeferredCode {
5370 public:
5371 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5372 : LDeferredCode(codegen), instr_(instr) { }
5373 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5374 LInstruction* instr() override { return instr_; }
5375
5376 private:
5377 LStackCheck* instr_;
5378 };
5379
5380 DCHECK(instr->HasEnvironment());
5381 LEnvironment* env = instr->environment();
5382 // There is no LLazyBailout instruction for stack-checks. We have to
5383 // prepare for lazy deoptimization explicitly here.
5384 if (instr->hydrogen()->is_function_entry()) {
5385 // Perform stack overflow check.
5386 Label done;
5387 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5388 __ Branch(&done, hs, sp, Operand(at));
5389 DCHECK(instr->context()->IsRegister());
5390 DCHECK(ToRegister(instr->context()).is(cp));
5391 CallCode(isolate()->builtins()->StackCheck(),
5392 RelocInfo::CODE_TARGET,
5393 instr);
5394 __ bind(&done);
5395 } else {
5396 DCHECK(instr->hydrogen()->is_backwards_branch());
5397 // Perform stack overflow check if this goto needs it before jumping.
5398 DeferredStackCheck* deferred_stack_check =
5399 new(zone()) DeferredStackCheck(this, instr);
5400 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5401 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5402 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5403 __ bind(instr->done_label());
5404 deferred_stack_check->SetExit(instr->done_label());
5405 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5406 // Don't record a deoptimization index for the safepoint here.
5407 // This will be done explicitly when emitting call and the safepoint in
5408 // the deferred code.
5409 }
5410}
5411
5412
5413void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5414 // This is a pseudo-instruction that ensures that the environment here is
5415 // properly registered for deoptimization and records the assembler's PC
5416 // offset.
5417 LEnvironment* environment = instr->environment();
5418
5419 // If the environment were already registered, we would have no way of
5420 // backpatching it with the spill slot operands.
5421 DCHECK(!environment->HasBeenRegistered());
5422 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5423
5424 GenerateOsrPrologue();
5425}
5426
5427
5428void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5429 Register result = ToRegister(instr->result());
5430 Register object = ToRegister(instr->object());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005431
5432 Label use_cache, call_runtime;
5433 DCHECK(object.is(a0));
Ben Murdoch097c5b22016-05-18 11:27:45 +01005434 __ CheckEnumCache(&call_runtime);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005435
5436 __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5437 __ Branch(&use_cache);
5438
5439 // Get the set of properties to enumerate.
5440 __ bind(&call_runtime);
5441 __ push(object);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005442 CallRuntime(Runtime::kForInEnumerate, instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005443 __ bind(&use_cache);
5444}
5445
5446
5447void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5448 Register map = ToRegister(instr->map());
5449 Register result = ToRegister(instr->result());
5450 Label load_cache, done;
5451 __ EnumLength(result, map);
5452 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5453 __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5454 __ jmp(&done);
5455
5456 __ bind(&load_cache);
5457 __ LoadInstanceDescriptors(map, result);
5458 __ lw(result,
5459 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5460 __ lw(result,
5461 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5462 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
5463
5464 __ bind(&done);
5465}
5466
5467
5468void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5469 Register object = ToRegister(instr->value());
5470 Register map = ToRegister(instr->map());
5471 __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5472 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
5473}
5474
5475
5476void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5477 Register result,
5478 Register object,
5479 Register index) {
5480 PushSafepointRegistersScope scope(this);
5481 __ Push(object, index);
5482 __ mov(cp, zero_reg);
5483 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5484 RecordSafepointWithRegisters(
5485 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5486 __ StoreToSafepointRegisterSlot(v0, result);
5487}
5488
5489
5490void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5491 class DeferredLoadMutableDouble final : public LDeferredCode {
5492 public:
5493 DeferredLoadMutableDouble(LCodeGen* codegen,
5494 LLoadFieldByIndex* instr,
5495 Register result,
5496 Register object,
5497 Register index)
5498 : LDeferredCode(codegen),
5499 instr_(instr),
5500 result_(result),
5501 object_(object),
5502 index_(index) {
5503 }
5504 void Generate() override {
5505 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5506 }
5507 LInstruction* instr() override { return instr_; }
5508
5509 private:
5510 LLoadFieldByIndex* instr_;
5511 Register result_;
5512 Register object_;
5513 Register index_;
5514 };
5515
5516 Register object = ToRegister(instr->object());
5517 Register index = ToRegister(instr->index());
5518 Register result = ToRegister(instr->result());
5519 Register scratch = scratch0();
5520
5521 DeferredLoadMutableDouble* deferred;
5522 deferred = new(zone()) DeferredLoadMutableDouble(
5523 this, instr, result, object, index);
5524
5525 Label out_of_object, done;
5526
5527 __ And(scratch, index, Operand(Smi::FromInt(1)));
5528 __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
5529 __ sra(index, index, 1);
5530
5531 __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5532 __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
5533
5534 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5535 __ Addu(scratch, object, scratch);
5536 __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5537
5538 __ Branch(&done);
5539
5540 __ bind(&out_of_object);
5541 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5542 // Index is equal to negated out of object property index plus 1.
5543 __ Subu(scratch, result, scratch);
5544 __ lw(result, FieldMemOperand(scratch,
5545 FixedArray::kHeaderSize - kPointerSize));
5546 __ bind(deferred->exit());
5547 __ bind(&done);
5548}
5549
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005550#undef __
5551
5552} // namespace internal
5553} // namespace v8