blob: f1717ca47440683b2cad753acd7390e9ca68d3d0 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2012 the V8 project authors. All rights reserved.7
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "src/crankshaft/mips/lithium-codegen-mips.h"
29
30#include "src/base/bits.h"
31#include "src/code-factory.h"
32#include "src/code-stubs.h"
33#include "src/crankshaft/hydrogen-osr.h"
34#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
35#include "src/ic/ic.h"
36#include "src/ic/stub-cache.h"
37#include "src/profiler/cpu-profiler.h"
38
39
40namespace v8 {
41namespace internal {
42
43
44class SafepointGenerator final : public CallWrapper {
45 public:
46 SafepointGenerator(LCodeGen* codegen,
47 LPointerMap* pointers,
48 Safepoint::DeoptMode mode)
49 : codegen_(codegen),
50 pointers_(pointers),
51 deopt_mode_(mode) { }
52 virtual ~SafepointGenerator() {}
53
54 void BeforeCall(int call_size) const override {}
55
56 void AfterCall() const override {
57 codegen_->RecordSafepoint(pointers_, deopt_mode_);
58 }
59
60 private:
61 LCodeGen* codegen_;
62 LPointerMap* pointers_;
63 Safepoint::DeoptMode deopt_mode_;
64};
65
66
67#define __ masm()->
68
69bool LCodeGen::GenerateCode() {
70 LPhase phase("Z_Code generation", chunk());
71 DCHECK(is_unused());
72 status_ = GENERATING;
73
74 // Open a frame scope to indicate that there is a frame on the stack. The
75 // NONE indicates that the scope shouldn't actually generate code to set up
76 // the frame (that is done in GeneratePrologue).
77 FrameScope frame_scope(masm_, StackFrame::NONE);
78
79 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
80 GenerateJumpTable() && GenerateSafepointTable();
81}
82
83
84void LCodeGen::FinishCode(Handle<Code> code) {
85 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +010086 code->set_stack_slots(GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000087 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
88 PopulateDeoptimizationData(code);
89}
90
91
92void LCodeGen::SaveCallerDoubles() {
93 DCHECK(info()->saves_caller_doubles());
94 DCHECK(NeedsEagerFrame());
95 Comment(";;; Save clobbered callee double registers");
96 int count = 0;
97 BitVector* doubles = chunk()->allocated_double_registers();
98 BitVector::Iterator save_iterator(doubles);
99 while (!save_iterator.Done()) {
100 __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
101 MemOperand(sp, count * kDoubleSize));
102 save_iterator.Advance();
103 count++;
104 }
105}
106
107
108void LCodeGen::RestoreCallerDoubles() {
109 DCHECK(info()->saves_caller_doubles());
110 DCHECK(NeedsEagerFrame());
111 Comment(";;; Restore clobbered callee double registers");
112 BitVector* doubles = chunk()->allocated_double_registers();
113 BitVector::Iterator save_iterator(doubles);
114 int count = 0;
115 while (!save_iterator.Done()) {
116 __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
117 MemOperand(sp, count * kDoubleSize));
118 save_iterator.Advance();
119 count++;
120 }
121}
122
123
124bool LCodeGen::GeneratePrologue() {
125 DCHECK(is_generating());
126
127 if (info()->IsOptimizing()) {
128 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
129
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000130 // a1: Callee's JS function.
131 // cp: Callee's context.
132 // fp: Caller's frame pointer.
133 // lr: Caller's pc.
134 }
135
136 info()->set_prologue_offset(masm_->pc_offset());
137 if (NeedsEagerFrame()) {
138 if (info()->IsStub()) {
Ben Murdochda12d292016-06-02 14:46:10 +0100139 __ StubPrologue(StackFrame::STUB);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000140 } else {
141 __ Prologue(info()->GeneratePreagedPrologue());
142 }
143 frame_is_built_ = true;
144 }
145
146 // Reserve space for the stack slots needed by the code.
147 int slots = GetStackSlotCount();
148 if (slots > 0) {
149 if (FLAG_debug_code) {
150 __ Subu(sp, sp, Operand(slots * kPointerSize));
151 __ Push(a0, a1);
152 __ Addu(a0, sp, Operand(slots * kPointerSize));
153 __ li(a1, Operand(kSlotsZapValue));
154 Label loop;
155 __ bind(&loop);
156 __ Subu(a0, a0, Operand(kPointerSize));
157 __ sw(a1, MemOperand(a0, 2 * kPointerSize));
158 __ Branch(&loop, ne, a0, Operand(sp));
159 __ Pop(a0, a1);
160 } else {
161 __ Subu(sp, sp, Operand(slots * kPointerSize));
162 }
163 }
164
165 if (info()->saves_caller_doubles()) {
166 SaveCallerDoubles();
167 }
168 return !is_aborted();
169}
170
171
172void LCodeGen::DoPrologue(LPrologue* instr) {
173 Comment(";;; Prologue begin");
174
175 // Possibly allocate a local context.
176 if (info()->scope()->num_heap_slots() > 0) {
177 Comment(";;; Allocate local context");
178 bool need_write_barrier = true;
179 // Argument to NewContext is the function, which is in a1.
180 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
181 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
182 if (info()->scope()->is_script_scope()) {
183 __ push(a1);
184 __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
185 __ CallRuntime(Runtime::kNewScriptContext);
186 deopt_mode = Safepoint::kLazyDeopt;
187 } else if (slots <= FastNewContextStub::kMaximumSlots) {
188 FastNewContextStub stub(isolate(), slots);
189 __ CallStub(&stub);
190 // Result of FastNewContextStub is always in new space.
191 need_write_barrier = false;
192 } else {
193 __ push(a1);
194 __ CallRuntime(Runtime::kNewFunctionContext);
195 }
196 RecordSafepoint(deopt_mode);
197
198 // Context is returned in both v0. It replaces the context passed to us.
199 // It's saved in the stack and kept live in cp.
200 __ mov(cp, v0);
201 __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
202 // Copy any necessary parameters into the context.
203 int num_parameters = scope()->num_parameters();
204 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
205 for (int i = first_parameter; i < num_parameters; i++) {
206 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
207 if (var->IsContextSlot()) {
208 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
209 (num_parameters - 1 - i) * kPointerSize;
210 // Load parameter from stack.
211 __ lw(a0, MemOperand(fp, parameter_offset));
212 // Store it in the context.
213 MemOperand target = ContextMemOperand(cp, var->index());
214 __ sw(a0, target);
215 // Update the write barrier. This clobbers a3 and a0.
216 if (need_write_barrier) {
217 __ RecordWriteContextSlot(
218 cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
219 } else if (FLAG_debug_code) {
220 Label done;
221 __ JumpIfInNewSpace(cp, a0, &done);
222 __ Abort(kExpectedNewSpaceObject);
223 __ bind(&done);
224 }
225 }
226 }
227 Comment(";;; End allocate local context");
228 }
229
230 Comment(";;; Prologue end");
231}
232
233
234void LCodeGen::GenerateOsrPrologue() {
235 // Generate the OSR entry prologue at the first unknown OSR value, or if there
236 // are none, at the OSR entrypoint instruction.
237 if (osr_pc_offset_ >= 0) return;
238
239 osr_pc_offset_ = masm()->pc_offset();
240
241 // Adjust the frame size, subsuming the unoptimized frame into the
242 // optimized frame.
243 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
244 DCHECK(slots >= 0);
245 __ Subu(sp, sp, Operand(slots * kPointerSize));
246}
247
248
249void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
250 if (instr->IsCall()) {
251 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
252 }
253 if (!instr->IsLazyBailout() && !instr->IsGap()) {
254 safepoints_.BumpLastLazySafepointIndex();
255 }
256}
257
258
259bool LCodeGen::GenerateDeferredCode() {
260 DCHECK(is_generating());
261 if (deferred_.length() > 0) {
262 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
263 LDeferredCode* code = deferred_[i];
264
265 HValue* value =
266 instructions_->at(code->instruction_index())->hydrogen_value();
267 RecordAndWritePosition(
268 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
269
270 Comment(";;; <@%d,#%d> "
271 "-------------------- Deferred %s --------------------",
272 code->instruction_index(),
273 code->instr()->hydrogen_value()->id(),
274 code->instr()->Mnemonic());
275 __ bind(code->entry());
276 if (NeedsDeferredFrame()) {
277 Comment(";;; Build frame");
278 DCHECK(!frame_is_built_);
279 DCHECK(info()->IsStub());
280 frame_is_built_ = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000281 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
Ben Murdochda12d292016-06-02 14:46:10 +0100282 __ PushCommonFrame(scratch0());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000283 Comment(";;; Deferred code");
284 }
285 code->Generate();
286 if (NeedsDeferredFrame()) {
287 Comment(";;; Destroy frame");
288 DCHECK(frame_is_built_);
Ben Murdochda12d292016-06-02 14:46:10 +0100289 __ PopCommonFrame(scratch0());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000290 frame_is_built_ = false;
291 }
292 __ jmp(code->exit());
293 }
294 }
295 // Deferred code is the last part of the instruction sequence. Mark
296 // the generated code as done unless we bailed out.
297 if (!is_aborted()) status_ = DONE;
298 return !is_aborted();
299}
300
301
302bool LCodeGen::GenerateJumpTable() {
303 if (jump_table_.length() > 0) {
304 Label needs_frame, call_deopt_entry;
305
306 Comment(";;; -------------------- Jump table --------------------");
307 Address base = jump_table_[0].address;
308
309 Register entry_offset = t9;
310
311 int length = jump_table_.length();
312 for (int i = 0; i < length; i++) {
313 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
314 __ bind(&table_entry->label);
315
316 DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
317 Address entry = table_entry->address;
318 DeoptComment(table_entry->deopt_info);
319
320 // Second-level deopt table entries are contiguous and small, so instead
321 // of loading the full, absolute address of each one, load an immediate
322 // offset which will be added to the base address later.
323 __ li(entry_offset, Operand(entry - base));
324
325 if (table_entry->needs_frame) {
326 DCHECK(!info()->saves_caller_doubles());
327 Comment(";;; call deopt with frame");
Ben Murdochda12d292016-06-02 14:46:10 +0100328 __ PushCommonFrame();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000329 __ Call(&needs_frame);
330 } else {
331 __ Call(&call_deopt_entry);
332 }
333 info()->LogDeoptCallPosition(masm()->pc_offset(),
334 table_entry->deopt_info.inlining_id);
335 }
336
337 if (needs_frame.is_linked()) {
338 __ bind(&needs_frame);
339 // This variant of deopt can only be used with stubs. Since we don't
340 // have a function pointer to install in the stack frame that we're
341 // building, install a special marker there instead.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000342 __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
343 __ push(at);
Ben Murdochda12d292016-06-02 14:46:10 +0100344 DCHECK(info()->IsStub());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000345 }
346
347 Comment(";;; call deopt");
348 __ bind(&call_deopt_entry);
349
350 if (info()->saves_caller_doubles()) {
351 DCHECK(info()->IsStub());
352 RestoreCallerDoubles();
353 }
354
355 // Add the base address to the offset previously loaded in entry_offset.
356 __ Addu(entry_offset, entry_offset,
357 Operand(ExternalReference::ForDeoptEntry(base)));
358 __ Jump(entry_offset);
359 }
360 __ RecordComment("]");
361
362 // The deoptimization jump table is the last part of the instruction
363 // sequence. Mark the generated code as done unless we bailed out.
364 if (!is_aborted()) status_ = DONE;
365 return !is_aborted();
366}
367
368
369bool LCodeGen::GenerateSafepointTable() {
370 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100371 safepoints_.Emit(masm(), GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000372 return !is_aborted();
373}
374
375
376Register LCodeGen::ToRegister(int index) const {
377 return Register::from_code(index);
378}
379
380
381DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
382 return DoubleRegister::from_code(index);
383}
384
385
386Register LCodeGen::ToRegister(LOperand* op) const {
387 DCHECK(op->IsRegister());
388 return ToRegister(op->index());
389}
390
391
392Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
393 if (op->IsRegister()) {
394 return ToRegister(op->index());
395 } else if (op->IsConstantOperand()) {
396 LConstantOperand* const_op = LConstantOperand::cast(op);
397 HConstant* constant = chunk_->LookupConstant(const_op);
398 Handle<Object> literal = constant->handle(isolate());
399 Representation r = chunk_->LookupLiteralRepresentation(const_op);
400 if (r.IsInteger32()) {
401 AllowDeferredHandleDereference get_number;
402 DCHECK(literal->IsNumber());
403 __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
404 } else if (r.IsSmi()) {
405 DCHECK(constant->HasSmiValue());
406 __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
407 } else if (r.IsDouble()) {
408 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
409 } else {
410 DCHECK(r.IsSmiOrTagged());
411 __ li(scratch, literal);
412 }
413 return scratch;
414 } else if (op->IsStackSlot()) {
415 __ lw(scratch, ToMemOperand(op));
416 return scratch;
417 }
418 UNREACHABLE();
419 return scratch;
420}
421
422
423DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
424 DCHECK(op->IsDoubleRegister());
425 return ToDoubleRegister(op->index());
426}
427
428
429DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
430 FloatRegister flt_scratch,
431 DoubleRegister dbl_scratch) {
432 if (op->IsDoubleRegister()) {
433 return ToDoubleRegister(op->index());
434 } else if (op->IsConstantOperand()) {
435 LConstantOperand* const_op = LConstantOperand::cast(op);
436 HConstant* constant = chunk_->LookupConstant(const_op);
437 Handle<Object> literal = constant->handle(isolate());
438 Representation r = chunk_->LookupLiteralRepresentation(const_op);
439 if (r.IsInteger32()) {
440 DCHECK(literal->IsNumber());
441 __ li(at, Operand(static_cast<int32_t>(literal->Number())));
442 __ mtc1(at, flt_scratch);
443 __ cvt_d_w(dbl_scratch, flt_scratch);
444 return dbl_scratch;
445 } else if (r.IsDouble()) {
446 Abort(kUnsupportedDoubleImmediate);
447 } else if (r.IsTagged()) {
448 Abort(kUnsupportedTaggedImmediate);
449 }
450 } else if (op->IsStackSlot()) {
451 MemOperand mem_op = ToMemOperand(op);
452 __ ldc1(dbl_scratch, mem_op);
453 return dbl_scratch;
454 }
455 UNREACHABLE();
456 return dbl_scratch;
457}
458
459
460Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
461 HConstant* constant = chunk_->LookupConstant(op);
462 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
463 return constant->handle(isolate());
464}
465
466
467bool LCodeGen::IsInteger32(LConstantOperand* op) const {
468 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
469}
470
471
472bool LCodeGen::IsSmi(LConstantOperand* op) const {
473 return chunk_->LookupLiteralRepresentation(op).IsSmi();
474}
475
476
477int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
478 return ToRepresentation(op, Representation::Integer32());
479}
480
481
482int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
483 const Representation& r) const {
484 HConstant* constant = chunk_->LookupConstant(op);
485 int32_t value = constant->Integer32Value();
486 if (r.IsInteger32()) return value;
487 DCHECK(r.IsSmiOrTagged());
488 return reinterpret_cast<int32_t>(Smi::FromInt(value));
489}
490
491
492Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
493 HConstant* constant = chunk_->LookupConstant(op);
494 return Smi::FromInt(constant->Integer32Value());
495}
496
497
498double LCodeGen::ToDouble(LConstantOperand* op) const {
499 HConstant* constant = chunk_->LookupConstant(op);
500 DCHECK(constant->HasDoubleValue());
501 return constant->DoubleValue();
502}
503
504
505Operand LCodeGen::ToOperand(LOperand* op) {
506 if (op->IsConstantOperand()) {
507 LConstantOperand* const_op = LConstantOperand::cast(op);
508 HConstant* constant = chunk()->LookupConstant(const_op);
509 Representation r = chunk_->LookupLiteralRepresentation(const_op);
510 if (r.IsSmi()) {
511 DCHECK(constant->HasSmiValue());
512 return Operand(Smi::FromInt(constant->Integer32Value()));
513 } else if (r.IsInteger32()) {
514 DCHECK(constant->HasInteger32Value());
515 return Operand(constant->Integer32Value());
516 } else if (r.IsDouble()) {
517 Abort(kToOperandUnsupportedDoubleImmediate);
518 }
519 DCHECK(r.IsTagged());
520 return Operand(constant->handle(isolate()));
521 } else if (op->IsRegister()) {
522 return Operand(ToRegister(op));
523 } else if (op->IsDoubleRegister()) {
524 Abort(kToOperandIsDoubleRegisterUnimplemented);
525 return Operand(0);
526 }
527 // Stack slots not implemented, use ToMemOperand instead.
528 UNREACHABLE();
529 return Operand(0);
530}
531
532
533static int ArgumentsOffsetWithoutFrame(int index) {
534 DCHECK(index < 0);
535 return -(index + 1) * kPointerSize;
536}
537
538
539MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
540 DCHECK(!op->IsRegister());
541 DCHECK(!op->IsDoubleRegister());
542 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
543 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100544 return MemOperand(fp, FrameSlotToFPOffset(op->index()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000545 } else {
546 // Retrieve parameter without eager stack-frame relative to the
547 // stack-pointer.
548 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
549 }
550}
551
552
553MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
554 DCHECK(op->IsDoubleStackSlot());
555 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100556 return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000557 } else {
558 // Retrieve parameter without eager stack-frame relative to the
559 // stack-pointer.
560 return MemOperand(
561 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
562 }
563}
564
565
566void LCodeGen::WriteTranslation(LEnvironment* environment,
567 Translation* translation) {
568 if (environment == NULL) return;
569
570 // The translation includes one command per value in the environment.
571 int translation_size = environment->translation_size();
572
573 WriteTranslation(environment->outer(), translation);
574 WriteTranslationFrame(environment, translation);
575
576 int object_index = 0;
577 int dematerialized_index = 0;
578 for (int i = 0; i < translation_size; ++i) {
579 LOperand* value = environment->values()->at(i);
580 AddToTranslation(
581 environment, translation, value, environment->HasTaggedValueAt(i),
582 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
583 }
584}
585
586
587void LCodeGen::AddToTranslation(LEnvironment* environment,
588 Translation* translation,
589 LOperand* op,
590 bool is_tagged,
591 bool is_uint32,
592 int* object_index_pointer,
593 int* dematerialized_index_pointer) {
594 if (op == LEnvironment::materialization_marker()) {
595 int object_index = (*object_index_pointer)++;
596 if (environment->ObjectIsDuplicateAt(object_index)) {
597 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
598 translation->DuplicateObject(dupe_of);
599 return;
600 }
601 int object_length = environment->ObjectLengthAt(object_index);
602 if (environment->ObjectIsArgumentsAt(object_index)) {
603 translation->BeginArgumentsObject(object_length);
604 } else {
605 translation->BeginCapturedObject(object_length);
606 }
607 int dematerialized_index = *dematerialized_index_pointer;
608 int env_offset = environment->translation_size() + dematerialized_index;
609 *dematerialized_index_pointer += object_length;
610 for (int i = 0; i < object_length; ++i) {
611 LOperand* value = environment->values()->at(env_offset + i);
612 AddToTranslation(environment,
613 translation,
614 value,
615 environment->HasTaggedValueAt(env_offset + i),
616 environment->HasUint32ValueAt(env_offset + i),
617 object_index_pointer,
618 dematerialized_index_pointer);
619 }
620 return;
621 }
622
623 if (op->IsStackSlot()) {
624 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000625 if (is_tagged) {
626 translation->StoreStackSlot(index);
627 } else if (is_uint32) {
628 translation->StoreUint32StackSlot(index);
629 } else {
630 translation->StoreInt32StackSlot(index);
631 }
632 } else if (op->IsDoubleStackSlot()) {
633 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000634 translation->StoreDoubleStackSlot(index);
635 } else if (op->IsRegister()) {
636 Register reg = ToRegister(op);
637 if (is_tagged) {
638 translation->StoreRegister(reg);
639 } else if (is_uint32) {
640 translation->StoreUint32Register(reg);
641 } else {
642 translation->StoreInt32Register(reg);
643 }
644 } else if (op->IsDoubleRegister()) {
645 DoubleRegister reg = ToDoubleRegister(op);
646 translation->StoreDoubleRegister(reg);
647 } else if (op->IsConstantOperand()) {
648 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
649 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
650 translation->StoreLiteral(src_index);
651 } else {
652 UNREACHABLE();
653 }
654}
655
656
657void LCodeGen::CallCode(Handle<Code> code,
658 RelocInfo::Mode mode,
659 LInstruction* instr) {
660 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
661}
662
663
664void LCodeGen::CallCodeGeneric(Handle<Code> code,
665 RelocInfo::Mode mode,
666 LInstruction* instr,
667 SafepointMode safepoint_mode) {
668 DCHECK(instr != NULL);
669 __ Call(code, mode);
670 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
671}
672
673
674void LCodeGen::CallRuntime(const Runtime::Function* function,
675 int num_arguments,
676 LInstruction* instr,
677 SaveFPRegsMode save_doubles) {
678 DCHECK(instr != NULL);
679
680 __ CallRuntime(function, num_arguments, save_doubles);
681
682 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
683}
684
685
686void LCodeGen::LoadContextFromDeferred(LOperand* context) {
687 if (context->IsRegister()) {
688 __ Move(cp, ToRegister(context));
689 } else if (context->IsStackSlot()) {
690 __ lw(cp, ToMemOperand(context));
691 } else if (context->IsConstantOperand()) {
692 HConstant* constant =
693 chunk_->LookupConstant(LConstantOperand::cast(context));
694 __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
695 } else {
696 UNREACHABLE();
697 }
698}
699
700
701void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
702 int argc,
703 LInstruction* instr,
704 LOperand* context) {
705 LoadContextFromDeferred(context);
706 __ CallRuntimeSaveDoubles(id);
707 RecordSafepointWithRegisters(
708 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
709}
710
711
712void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
713 Safepoint::DeoptMode mode) {
714 environment->set_has_been_used();
715 if (!environment->HasBeenRegistered()) {
716 // Physical stack frame layout:
717 // -x ............. -4 0 ..................................... y
718 // [incoming arguments] [spill slots] [pushed outgoing arguments]
719
720 // Layout of the environment:
721 // 0 ..................................................... size-1
722 // [parameters] [locals] [expression stack including arguments]
723
724 // Layout of the translation:
725 // 0 ........................................................ size - 1 + 4
726 // [expression stack including arguments] [locals] [4 words] [parameters]
727 // |>------------ translation_size ------------<|
728
729 int frame_count = 0;
730 int jsframe_count = 0;
731 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
732 ++frame_count;
733 if (e->frame_type() == JS_FUNCTION) {
734 ++jsframe_count;
735 }
736 }
737 Translation translation(&translations_, frame_count, jsframe_count, zone());
738 WriteTranslation(environment, &translation);
739 int deoptimization_index = deoptimizations_.length();
740 int pc_offset = masm()->pc_offset();
741 environment->Register(deoptimization_index,
742 translation.index(),
743 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
744 deoptimizations_.Add(environment, zone());
745 }
746}
747
748
749void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
750 Deoptimizer::DeoptReason deopt_reason,
751 Deoptimizer::BailoutType bailout_type,
752 Register src1, const Operand& src2) {
753 LEnvironment* environment = instr->environment();
754 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
755 DCHECK(environment->HasBeenRegistered());
756 int id = environment->deoptimization_index();
757 Address entry =
758 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
759 if (entry == NULL) {
760 Abort(kBailoutWasNotPrepared);
761 return;
762 }
763
764 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
765 Register scratch = scratch0();
766 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
767 Label no_deopt;
768 __ Push(a1, scratch);
769 __ li(scratch, Operand(count));
770 __ lw(a1, MemOperand(scratch));
771 __ Subu(a1, a1, Operand(1));
772 __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
773 __ li(a1, Operand(FLAG_deopt_every_n_times));
774 __ sw(a1, MemOperand(scratch));
775 __ Pop(a1, scratch);
776
777 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
778 __ bind(&no_deopt);
779 __ sw(a1, MemOperand(scratch));
780 __ Pop(a1, scratch);
781 }
782
783 if (info()->ShouldTrapOnDeopt()) {
784 Label skip;
785 if (condition != al) {
786 __ Branch(&skip, NegateCondition(condition), src1, src2);
787 }
788 __ stop("trap_on_deopt");
789 __ bind(&skip);
790 }
791
792 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
793
794 DCHECK(info()->IsStub() || frame_is_built_);
795 // Go through jump table if we need to handle condition, build frame, or
796 // restore caller doubles.
797 if (condition == al && frame_is_built_ &&
798 !info()->saves_caller_doubles()) {
799 DeoptComment(deopt_info);
800 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
801 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
802 } else {
803 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
804 !frame_is_built_);
805 // We often have several deopts to the same entry, reuse the last
806 // jump entry if this is the case.
807 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
808 jump_table_.is_empty() ||
809 !table_entry.IsEquivalentTo(jump_table_.last())) {
810 jump_table_.Add(table_entry, zone());
811 }
812 __ Branch(&jump_table_.last().label, condition, src1, src2);
813 }
814}
815
816
817void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
818 Deoptimizer::DeoptReason deopt_reason,
819 Register src1, const Operand& src2) {
820 Deoptimizer::BailoutType bailout_type = info()->IsStub()
821 ? Deoptimizer::LAZY
822 : Deoptimizer::EAGER;
823 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
824}
825
826
827void LCodeGen::RecordSafepointWithLazyDeopt(
828 LInstruction* instr, SafepointMode safepoint_mode) {
829 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
830 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
831 } else {
832 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
833 RecordSafepointWithRegisters(
834 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
835 }
836}
837
838
839void LCodeGen::RecordSafepoint(
840 LPointerMap* pointers,
841 Safepoint::Kind kind,
842 int arguments,
843 Safepoint::DeoptMode deopt_mode) {
844 DCHECK(expected_safepoint_kind_ == kind);
845
846 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
847 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
848 kind, arguments, deopt_mode);
849 for (int i = 0; i < operands->length(); i++) {
850 LOperand* pointer = operands->at(i);
851 if (pointer->IsStackSlot()) {
852 safepoint.DefinePointerSlot(pointer->index(), zone());
853 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
854 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
855 }
856 }
857}
858
859
860void LCodeGen::RecordSafepoint(LPointerMap* pointers,
861 Safepoint::DeoptMode deopt_mode) {
862 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
863}
864
865
866void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
867 LPointerMap empty_pointers(zone());
868 RecordSafepoint(&empty_pointers, deopt_mode);
869}
870
871
872void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
873 int arguments,
874 Safepoint::DeoptMode deopt_mode) {
875 RecordSafepoint(
876 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
877}
878
879
880void LCodeGen::RecordAndWritePosition(int position) {
881 if (position == RelocInfo::kNoPosition) return;
882 masm()->positions_recorder()->RecordPosition(position);
883 masm()->positions_recorder()->WriteRecordedPositions();
884}
885
886
887static const char* LabelType(LLabel* label) {
888 if (label->is_loop_header()) return " (loop header)";
889 if (label->is_osr_entry()) return " (OSR entry)";
890 return "";
891}
892
893
894void LCodeGen::DoLabel(LLabel* label) {
895 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
896 current_instruction_,
897 label->hydrogen_value()->id(),
898 label->block_id(),
899 LabelType(label));
900 __ bind(label->label());
901 current_block_ = label->block_id();
902 DoGap(label);
903}
904
905
906void LCodeGen::DoParallelMove(LParallelMove* move) {
907 resolver_.Resolve(move);
908}
909
910
911void LCodeGen::DoGap(LGap* gap) {
912 for (int i = LGap::FIRST_INNER_POSITION;
913 i <= LGap::LAST_INNER_POSITION;
914 i++) {
915 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
916 LParallelMove* move = gap->GetParallelMove(inner_pos);
917 if (move != NULL) DoParallelMove(move);
918 }
919}
920
921
922void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
923 DoGap(instr);
924}
925
926
927void LCodeGen::DoParameter(LParameter* instr) {
928 // Nothing to do.
929}
930
931
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000932void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
933 GenerateOsrPrologue();
934}
935
936
937void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
938 Register dividend = ToRegister(instr->dividend());
939 int32_t divisor = instr->divisor();
940 DCHECK(dividend.is(ToRegister(instr->result())));
941
942 // Theoretically, a variation of the branch-free code for integer division by
943 // a power of 2 (calculating the remainder via an additional multiplication
944 // (which gets simplified to an 'and') and subtraction) should be faster, and
945 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
946 // indicate that positive dividends are heavily favored, so the branching
947 // version performs better.
948 HMod* hmod = instr->hydrogen();
949 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
950 Label dividend_is_not_negative, done;
951
952 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
953 __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
954 // Note: The code below even works when right contains kMinInt.
955 __ subu(dividend, zero_reg, dividend);
956 __ And(dividend, dividend, Operand(mask));
957 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
958 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
959 Operand(zero_reg));
960 }
961 __ Branch(USE_DELAY_SLOT, &done);
962 __ subu(dividend, zero_reg, dividend);
963 }
964
965 __ bind(&dividend_is_not_negative);
966 __ And(dividend, dividend, Operand(mask));
967 __ bind(&done);
968}
969
970
971void LCodeGen::DoModByConstI(LModByConstI* instr) {
972 Register dividend = ToRegister(instr->dividend());
973 int32_t divisor = instr->divisor();
974 Register result = ToRegister(instr->result());
975 DCHECK(!dividend.is(result));
976
977 if (divisor == 0) {
978 DeoptimizeIf(al, instr);
979 return;
980 }
981
982 __ TruncatingDiv(result, dividend, Abs(divisor));
983 __ Mul(result, result, Operand(Abs(divisor)));
984 __ Subu(result, dividend, Operand(result));
985
986 // Check for negative zero.
987 HMod* hmod = instr->hydrogen();
988 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
989 Label remainder_not_zero;
990 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
991 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
992 Operand(zero_reg));
993 __ bind(&remainder_not_zero);
994 }
995}
996
997
998void LCodeGen::DoModI(LModI* instr) {
999 HMod* hmod = instr->hydrogen();
1000 const Register left_reg = ToRegister(instr->left());
1001 const Register right_reg = ToRegister(instr->right());
1002 const Register result_reg = ToRegister(instr->result());
1003
1004 // div runs in the background while we check for special cases.
1005 __ Mod(result_reg, left_reg, right_reg);
1006
1007 Label done;
1008 // Check for x % 0, we have to deopt in this case because we can't return a
1009 // NaN.
1010 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1011 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
1012 Operand(zero_reg));
1013 }
1014
1015 // Check for kMinInt % -1, div will return kMinInt, which is not what we
1016 // want. We have to deopt if we care about -0, because we can't return that.
1017 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1018 Label no_overflow_possible;
1019 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1020 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1021 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
1022 } else {
1023 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1024 __ Branch(USE_DELAY_SLOT, &done);
1025 __ mov(result_reg, zero_reg);
1026 }
1027 __ bind(&no_overflow_possible);
1028 }
1029
1030 // If we care about -0, test if the dividend is <0 and the result is 0.
1031 __ Branch(&done, ge, left_reg, Operand(zero_reg));
1032 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1033 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
1034 Operand(zero_reg));
1035 }
1036 __ bind(&done);
1037}
1038
1039
1040void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1041 Register dividend = ToRegister(instr->dividend());
1042 int32_t divisor = instr->divisor();
1043 Register result = ToRegister(instr->result());
1044 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1045 DCHECK(!result.is(dividend));
1046
1047 // Check for (0 / -x) that will produce negative zero.
1048 HDiv* hdiv = instr->hydrogen();
1049 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1050 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1051 Operand(zero_reg));
1052 }
1053 // Check for (kMinInt / -1).
1054 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1055 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
1056 }
1057 // Deoptimize if remainder will not be 0.
1058 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1059 divisor != 1 && divisor != -1) {
1060 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1061 __ And(at, dividend, Operand(mask));
1062 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
1063 }
1064
1065 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1066 __ Subu(result, zero_reg, dividend);
1067 return;
1068 }
1069 uint16_t shift = WhichPowerOf2Abs(divisor);
1070 if (shift == 0) {
1071 __ Move(result, dividend);
1072 } else if (shift == 1) {
1073 __ srl(result, dividend, 31);
1074 __ Addu(result, dividend, Operand(result));
1075 } else {
1076 __ sra(result, dividend, 31);
1077 __ srl(result, result, 32 - shift);
1078 __ Addu(result, dividend, Operand(result));
1079 }
1080 if (shift > 0) __ sra(result, result, shift);
1081 if (divisor < 0) __ Subu(result, zero_reg, result);
1082}
1083
1084
1085void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1086 Register dividend = ToRegister(instr->dividend());
1087 int32_t divisor = instr->divisor();
1088 Register result = ToRegister(instr->result());
1089 DCHECK(!dividend.is(result));
1090
1091 if (divisor == 0) {
1092 DeoptimizeIf(al, instr);
1093 return;
1094 }
1095
1096 // Check for (0 / -x) that will produce negative zero.
1097 HDiv* hdiv = instr->hydrogen();
1098 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1099 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1100 Operand(zero_reg));
1101 }
1102
1103 __ TruncatingDiv(result, dividend, Abs(divisor));
1104 if (divisor < 0) __ Subu(result, zero_reg, result);
1105
1106 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1107 __ Mul(scratch0(), result, Operand(divisor));
1108 __ Subu(scratch0(), scratch0(), dividend);
1109 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
1110 Operand(zero_reg));
1111 }
1112}
1113
1114
1115// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1116void LCodeGen::DoDivI(LDivI* instr) {
1117 HBinaryOperation* hdiv = instr->hydrogen();
1118 Register dividend = ToRegister(instr->dividend());
1119 Register divisor = ToRegister(instr->divisor());
1120 const Register result = ToRegister(instr->result());
1121 Register remainder = ToRegister(instr->temp());
1122
1123 // On MIPS div is asynchronous - it will run in the background while we
1124 // check for special cases.
1125 __ Div(remainder, result, dividend, divisor);
1126
1127 // Check for x / 0.
1128 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1129 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1130 Operand(zero_reg));
1131 }
1132
1133 // Check for (0 / -x) that will produce negative zero.
1134 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1135 Label left_not_zero;
1136 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1137 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1138 Operand(zero_reg));
1139 __ bind(&left_not_zero);
1140 }
1141
1142 // Check for (kMinInt / -1).
1143 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1144 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1145 Label left_not_min_int;
1146 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1147 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1148 __ bind(&left_not_min_int);
1149 }
1150
1151 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1152 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
1153 Operand(zero_reg));
1154 }
1155}
1156
1157
1158void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1159 DoubleRegister addend = ToDoubleRegister(instr->addend());
1160 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1161 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1162
1163 // This is computed in-place.
1164 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1165
1166 __ madd_d(addend, addend, multiplier, multiplicand);
1167}
1168
1169
1170void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1171 Register dividend = ToRegister(instr->dividend());
1172 Register result = ToRegister(instr->result());
1173 int32_t divisor = instr->divisor();
1174 Register scratch = result.is(dividend) ? scratch0() : dividend;
1175 DCHECK(!result.is(dividend) || !scratch.is(dividend));
1176
1177 // If the divisor is 1, return the dividend.
1178 if (divisor == 1) {
1179 __ Move(result, dividend);
1180 return;
1181 }
1182
1183 // If the divisor is positive, things are easy: There can be no deopts and we
1184 // can simply do an arithmetic right shift.
1185 uint16_t shift = WhichPowerOf2Abs(divisor);
1186 if (divisor > 1) {
1187 __ sra(result, dividend, shift);
1188 return;
1189 }
1190
1191 // If the divisor is negative, we have to negate and handle edge cases.
1192
1193 // dividend can be the same register as result so save the value of it
1194 // for checking overflow.
1195 __ Move(scratch, dividend);
1196
1197 __ Subu(result, zero_reg, dividend);
1198 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1199 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
1200 }
1201
1202 // Dividing by -1 is basically negation, unless we overflow.
1203 __ Xor(scratch, scratch, result);
1204 if (divisor == -1) {
1205 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1206 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
1207 Operand(zero_reg));
1208 }
1209 return;
1210 }
1211
1212 // If the negation could not overflow, simply shifting is OK.
1213 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1214 __ sra(result, result, shift);
1215 return;
1216 }
1217
1218 Label no_overflow, done;
1219 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
1220 __ li(result, Operand(kMinInt / divisor));
1221 __ Branch(&done);
1222 __ bind(&no_overflow);
1223 __ sra(result, result, shift);
1224 __ bind(&done);
1225}
1226
1227
1228void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1229 Register dividend = ToRegister(instr->dividend());
1230 int32_t divisor = instr->divisor();
1231 Register result = ToRegister(instr->result());
1232 DCHECK(!dividend.is(result));
1233
1234 if (divisor == 0) {
1235 DeoptimizeIf(al, instr);
1236 return;
1237 }
1238
1239 // Check for (0 / -x) that will produce negative zero.
1240 HMathFloorOfDiv* hdiv = instr->hydrogen();
1241 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1242 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1243 Operand(zero_reg));
1244 }
1245
1246 // Easy case: We need no dynamic check for the dividend and the flooring
1247 // division is the same as the truncating division.
1248 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1249 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1250 __ TruncatingDiv(result, dividend, Abs(divisor));
1251 if (divisor < 0) __ Subu(result, zero_reg, result);
1252 return;
1253 }
1254
1255 // In the general case we may need to adjust before and after the truncating
1256 // division to get a flooring division.
1257 Register temp = ToRegister(instr->temp());
1258 DCHECK(!temp.is(dividend) && !temp.is(result));
1259 Label needs_adjustment, done;
1260 __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1261 dividend, Operand(zero_reg));
1262 __ TruncatingDiv(result, dividend, Abs(divisor));
1263 if (divisor < 0) __ Subu(result, zero_reg, result);
1264 __ jmp(&done);
1265 __ bind(&needs_adjustment);
1266 __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1267 __ TruncatingDiv(result, temp, Abs(divisor));
1268 if (divisor < 0) __ Subu(result, zero_reg, result);
1269 __ Subu(result, result, Operand(1));
1270 __ bind(&done);
1271}
1272
1273
1274// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1275void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1276 HBinaryOperation* hdiv = instr->hydrogen();
1277 Register dividend = ToRegister(instr->dividend());
1278 Register divisor = ToRegister(instr->divisor());
1279 const Register result = ToRegister(instr->result());
1280 Register remainder = scratch0();
1281 // On MIPS div is asynchronous - it will run in the background while we
1282 // check for special cases.
1283 __ Div(remainder, result, dividend, divisor);
1284
1285 // Check for x / 0.
1286 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1287 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1288 Operand(zero_reg));
1289 }
1290
1291 // Check for (0 / -x) that will produce negative zero.
1292 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1293 Label left_not_zero;
1294 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1295 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1296 Operand(zero_reg));
1297 __ bind(&left_not_zero);
1298 }
1299
1300 // Check for (kMinInt / -1).
1301 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1302 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1303 Label left_not_min_int;
1304 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1305 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1306 __ bind(&left_not_min_int);
1307 }
1308
1309 // We performed a truncating division. Correct the result if necessary.
1310 Label done;
1311 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1312 __ Xor(remainder, remainder, Operand(divisor));
1313 __ Branch(&done, ge, remainder, Operand(zero_reg));
1314 __ Subu(result, result, Operand(1));
1315 __ bind(&done);
1316}
1317
1318
1319void LCodeGen::DoMulI(LMulI* instr) {
1320 Register scratch = scratch0();
1321 Register result = ToRegister(instr->result());
1322 // Note that result may alias left.
1323 Register left = ToRegister(instr->left());
1324 LOperand* right_op = instr->right();
1325
1326 bool bailout_on_minus_zero =
1327 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1328 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1329
1330 if (right_op->IsConstantOperand()) {
1331 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1332
1333 if (bailout_on_minus_zero && (constant < 0)) {
1334 // The case of a null constant will be handled separately.
1335 // If constant is negative and left is null, the result should be -0.
1336 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
1337 }
1338
1339 switch (constant) {
1340 case -1:
1341 if (overflow) {
1342 Label no_overflow;
1343 __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
1344 DeoptimizeIf(al, instr);
1345 __ bind(&no_overflow);
1346 } else {
1347 __ Subu(result, zero_reg, left);
1348 }
1349 break;
1350 case 0:
1351 if (bailout_on_minus_zero) {
1352 // If left is strictly negative and the constant is null, the
1353 // result is -0. Deoptimize if required, otherwise return 0.
1354 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
1355 Operand(zero_reg));
1356 }
1357 __ mov(result, zero_reg);
1358 break;
1359 case 1:
1360 // Nothing to do.
1361 __ Move(result, left);
1362 break;
1363 default:
1364 // Multiplying by powers of two and powers of two plus or minus
1365 // one can be done faster with shifted operands.
1366 // For other constants we emit standard code.
1367 int32_t mask = constant >> 31;
1368 uint32_t constant_abs = (constant + mask) ^ mask;
1369
1370 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1371 int32_t shift = WhichPowerOf2(constant_abs);
1372 __ sll(result, left, shift);
1373 // Correct the sign of the result if the constant is negative.
1374 if (constant < 0) __ Subu(result, zero_reg, result);
1375 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1376 int32_t shift = WhichPowerOf2(constant_abs - 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001377 __ Lsa(result, left, left, shift);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001378 // Correct the sign of the result if the constant is negative.
1379 if (constant < 0) __ Subu(result, zero_reg, result);
1380 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1381 int32_t shift = WhichPowerOf2(constant_abs + 1);
1382 __ sll(scratch, left, shift);
1383 __ Subu(result, scratch, left);
1384 // Correct the sign of the result if the constant is negative.
1385 if (constant < 0) __ Subu(result, zero_reg, result);
1386 } else {
1387 // Generate standard code.
1388 __ li(at, constant);
1389 __ Mul(result, left, at);
1390 }
1391 }
1392
1393 } else {
1394 DCHECK(right_op->IsRegister());
1395 Register right = ToRegister(right_op);
1396
1397 if (overflow) {
1398 // hi:lo = left * right.
1399 if (instr->hydrogen()->representation().IsSmi()) {
1400 __ SmiUntag(result, left);
1401 __ Mul(scratch, result, result, right);
1402 } else {
1403 __ Mul(scratch, result, left, right);
1404 }
1405 __ sra(at, result, 31);
1406 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
1407 } else {
1408 if (instr->hydrogen()->representation().IsSmi()) {
1409 __ SmiUntag(result, left);
1410 __ Mul(result, result, right);
1411 } else {
1412 __ Mul(result, left, right);
1413 }
1414 }
1415
1416 if (bailout_on_minus_zero) {
1417 Label done;
1418 __ Xor(at, left, right);
1419 __ Branch(&done, ge, at, Operand(zero_reg));
1420 // Bail out if the result is minus zero.
1421 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
1422 Operand(zero_reg));
1423 __ bind(&done);
1424 }
1425 }
1426}
1427
1428
1429void LCodeGen::DoBitI(LBitI* instr) {
1430 LOperand* left_op = instr->left();
1431 LOperand* right_op = instr->right();
1432 DCHECK(left_op->IsRegister());
1433 Register left = ToRegister(left_op);
1434 Register result = ToRegister(instr->result());
1435 Operand right(no_reg);
1436
1437 if (right_op->IsStackSlot()) {
1438 right = Operand(EmitLoadRegister(right_op, at));
1439 } else {
1440 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1441 right = ToOperand(right_op);
1442 }
1443
1444 switch (instr->op()) {
1445 case Token::BIT_AND:
1446 __ And(result, left, right);
1447 break;
1448 case Token::BIT_OR:
1449 __ Or(result, left, right);
1450 break;
1451 case Token::BIT_XOR:
1452 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1453 __ Nor(result, zero_reg, left);
1454 } else {
1455 __ Xor(result, left, right);
1456 }
1457 break;
1458 default:
1459 UNREACHABLE();
1460 break;
1461 }
1462}
1463
1464
1465void LCodeGen::DoShiftI(LShiftI* instr) {
1466 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1467 // result may alias either of them.
1468 LOperand* right_op = instr->right();
1469 Register left = ToRegister(instr->left());
1470 Register result = ToRegister(instr->result());
1471 Register scratch = scratch0();
1472
1473 if (right_op->IsRegister()) {
1474 // No need to mask the right operand on MIPS, it is built into the variable
1475 // shift instructions.
1476 switch (instr->op()) {
1477 case Token::ROR:
1478 __ Ror(result, left, Operand(ToRegister(right_op)));
1479 break;
1480 case Token::SAR:
1481 __ srav(result, left, ToRegister(right_op));
1482 break;
1483 case Token::SHR:
1484 __ srlv(result, left, ToRegister(right_op));
1485 if (instr->can_deopt()) {
1486 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
1487 Operand(zero_reg));
1488 }
1489 break;
1490 case Token::SHL:
1491 __ sllv(result, left, ToRegister(right_op));
1492 break;
1493 default:
1494 UNREACHABLE();
1495 break;
1496 }
1497 } else {
1498 // Mask the right_op operand.
1499 int value = ToInteger32(LConstantOperand::cast(right_op));
1500 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1501 switch (instr->op()) {
1502 case Token::ROR:
1503 if (shift_count != 0) {
1504 __ Ror(result, left, Operand(shift_count));
1505 } else {
1506 __ Move(result, left);
1507 }
1508 break;
1509 case Token::SAR:
1510 if (shift_count != 0) {
1511 __ sra(result, left, shift_count);
1512 } else {
1513 __ Move(result, left);
1514 }
1515 break;
1516 case Token::SHR:
1517 if (shift_count != 0) {
1518 __ srl(result, left, shift_count);
1519 } else {
1520 if (instr->can_deopt()) {
1521 __ And(at, left, Operand(0x80000000));
1522 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
1523 Operand(zero_reg));
1524 }
1525 __ Move(result, left);
1526 }
1527 break;
1528 case Token::SHL:
1529 if (shift_count != 0) {
1530 if (instr->hydrogen_value()->representation().IsSmi() &&
1531 instr->can_deopt()) {
1532 if (shift_count != 1) {
1533 __ sll(result, left, shift_count - 1);
1534 __ SmiTagCheckOverflow(result, result, scratch);
1535 } else {
1536 __ SmiTagCheckOverflow(result, left, scratch);
1537 }
1538 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
1539 Operand(zero_reg));
1540 } else {
1541 __ sll(result, left, shift_count);
1542 }
1543 } else {
1544 __ Move(result, left);
1545 }
1546 break;
1547 default:
1548 UNREACHABLE();
1549 break;
1550 }
1551 }
1552}
1553
1554
1555void LCodeGen::DoSubI(LSubI* instr) {
1556 LOperand* left = instr->left();
1557 LOperand* right = instr->right();
1558 LOperand* result = instr->result();
1559 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1560
1561 if (!can_overflow) {
1562 if (right->IsStackSlot()) {
1563 Register right_reg = EmitLoadRegister(right, at);
1564 __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1565 } else {
1566 DCHECK(right->IsRegister() || right->IsConstantOperand());
1567 __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1568 }
1569 } else { // can_overflow.
1570 Register scratch = scratch0();
1571 Label no_overflow_label;
1572 if (right->IsStackSlot()) {
1573 Register right_reg = EmitLoadRegister(right, scratch);
1574 __ SubBranchNoOvf(ToRegister(result), ToRegister(left),
1575 Operand(right_reg), &no_overflow_label);
1576 } else {
1577 DCHECK(right->IsRegister() || right->IsConstantOperand());
1578 __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1579 &no_overflow_label, scratch);
1580 }
1581 DeoptimizeIf(al, instr);
1582 __ bind(&no_overflow_label);
1583 }
1584}
1585
1586
1587void LCodeGen::DoConstantI(LConstantI* instr) {
1588 __ li(ToRegister(instr->result()), Operand(instr->value()));
1589}
1590
1591
1592void LCodeGen::DoConstantS(LConstantS* instr) {
1593 __ li(ToRegister(instr->result()), Operand(instr->value()));
1594}
1595
1596
1597void LCodeGen::DoConstantD(LConstantD* instr) {
1598 DCHECK(instr->result()->IsDoubleRegister());
1599 DoubleRegister result = ToDoubleRegister(instr->result());
1600 double v = instr->value();
1601 __ Move(result, v);
1602}
1603
1604
1605void LCodeGen::DoConstantE(LConstantE* instr) {
1606 __ li(ToRegister(instr->result()), Operand(instr->value()));
1607}
1608
1609
1610void LCodeGen::DoConstantT(LConstantT* instr) {
1611 Handle<Object> object = instr->value(isolate());
1612 AllowDeferredHandleDereference smi_check;
1613 __ li(ToRegister(instr->result()), object);
1614}
1615
1616
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001617MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1618 LOperand* index,
1619 String::Encoding encoding) {
1620 if (index->IsConstantOperand()) {
1621 int offset = ToInteger32(LConstantOperand::cast(index));
1622 if (encoding == String::TWO_BYTE_ENCODING) {
1623 offset *= kUC16Size;
1624 }
1625 STATIC_ASSERT(kCharSize == 1);
1626 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1627 }
1628 Register scratch = scratch0();
1629 DCHECK(!scratch.is(string));
1630 DCHECK(!scratch.is(ToRegister(index)));
1631 if (encoding == String::ONE_BYTE_ENCODING) {
1632 __ Addu(scratch, string, ToRegister(index));
1633 } else {
1634 STATIC_ASSERT(kUC16Size == 2);
1635 __ sll(scratch, ToRegister(index), 1);
1636 __ Addu(scratch, string, scratch);
1637 }
1638 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1639}
1640
1641
1642void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1643 String::Encoding encoding = instr->hydrogen()->encoding();
1644 Register string = ToRegister(instr->string());
1645 Register result = ToRegister(instr->result());
1646
1647 if (FLAG_debug_code) {
1648 Register scratch = scratch0();
1649 __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1650 __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1651
1652 __ And(scratch, scratch,
1653 Operand(kStringRepresentationMask | kStringEncodingMask));
1654 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1655 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1656 __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1657 ? one_byte_seq_type : two_byte_seq_type));
1658 __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1659 }
1660
1661 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1662 if (encoding == String::ONE_BYTE_ENCODING) {
1663 __ lbu(result, operand);
1664 } else {
1665 __ lhu(result, operand);
1666 }
1667}
1668
1669
1670void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1671 String::Encoding encoding = instr->hydrogen()->encoding();
1672 Register string = ToRegister(instr->string());
1673 Register value = ToRegister(instr->value());
1674
1675 if (FLAG_debug_code) {
1676 Register scratch = scratch0();
1677 Register index = ToRegister(instr->index());
1678 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1679 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1680 int encoding_mask =
1681 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1682 ? one_byte_seq_type : two_byte_seq_type;
1683 __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1684 }
1685
1686 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1687 if (encoding == String::ONE_BYTE_ENCODING) {
1688 __ sb(value, operand);
1689 } else {
1690 __ sh(value, operand);
1691 }
1692}
1693
1694
1695void LCodeGen::DoAddI(LAddI* instr) {
1696 LOperand* left = instr->left();
1697 LOperand* right = instr->right();
1698 LOperand* result = instr->result();
1699 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1700
1701 if (!can_overflow) {
1702 if (right->IsStackSlot()) {
1703 Register right_reg = EmitLoadRegister(right, at);
1704 __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1705 } else {
1706 DCHECK(right->IsRegister() || right->IsConstantOperand());
1707 __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1708 }
1709 } else { // can_overflow.
1710 Register scratch = scratch1();
1711 Label no_overflow_label;
1712 if (right->IsStackSlot()) {
1713 Register right_reg = EmitLoadRegister(right, scratch);
1714 __ AddBranchNoOvf(ToRegister(result), ToRegister(left),
1715 Operand(right_reg), &no_overflow_label);
1716 } else {
1717 DCHECK(right->IsRegister() || right->IsConstantOperand());
1718 __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1719 &no_overflow_label, scratch);
1720 }
1721 DeoptimizeIf(al, instr);
1722 __ bind(&no_overflow_label);
1723 }
1724}
1725
1726
1727void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1728 LOperand* left = instr->left();
1729 LOperand* right = instr->right();
1730 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1731 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1732 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1733 Register left_reg = ToRegister(left);
1734 Register right_reg = EmitLoadRegister(right, scratch0());
1735 Register result_reg = ToRegister(instr->result());
1736 Label return_right, done;
1737 Register scratch = scratch1();
1738 __ Slt(scratch, left_reg, Operand(right_reg));
1739 if (condition == ge) {
1740 __ Movz(result_reg, left_reg, scratch);
1741 __ Movn(result_reg, right_reg, scratch);
1742 } else {
1743 DCHECK(condition == le);
1744 __ Movn(result_reg, left_reg, scratch);
1745 __ Movz(result_reg, right_reg, scratch);
1746 }
1747 } else {
1748 DCHECK(instr->hydrogen()->representation().IsDouble());
1749 FPURegister left_reg = ToDoubleRegister(left);
1750 FPURegister right_reg = ToDoubleRegister(right);
1751 FPURegister result_reg = ToDoubleRegister(instr->result());
1752 Label check_nan_left, check_zero, return_left, return_right, done;
1753 __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1754 __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1755 __ Branch(&return_right);
1756
1757 __ bind(&check_zero);
1758 // left == right != 0.
1759 __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1760 // At this point, both left and right are either 0 or -0.
1761 if (operation == HMathMinMax::kMathMin) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001762 // The algorithm is: -((-L) + (-R)), which in case of L and R being
1763 // different registers is most efficiently expressed as -((-L) - R).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001764 __ neg_d(left_reg, left_reg);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001765 if (left_reg.is(right_reg)) {
1766 __ add_d(result_reg, left_reg, right_reg);
1767 } else {
1768 __ sub_d(result_reg, left_reg, right_reg);
1769 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001770 __ neg_d(result_reg, result_reg);
1771 } else {
1772 __ add_d(result_reg, left_reg, right_reg);
1773 }
1774 __ Branch(&done);
1775
1776 __ bind(&check_nan_left);
1777 // left == NaN.
1778 __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1779 __ bind(&return_right);
1780 if (!right_reg.is(result_reg)) {
1781 __ mov_d(result_reg, right_reg);
1782 }
1783 __ Branch(&done);
1784
1785 __ bind(&return_left);
1786 if (!left_reg.is(result_reg)) {
1787 __ mov_d(result_reg, left_reg);
1788 }
1789 __ bind(&done);
1790 }
1791}
1792
1793
1794void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1795 DoubleRegister left = ToDoubleRegister(instr->left());
1796 DoubleRegister right = ToDoubleRegister(instr->right());
1797 DoubleRegister result = ToDoubleRegister(instr->result());
1798 switch (instr->op()) {
1799 case Token::ADD:
1800 __ add_d(result, left, right);
1801 break;
1802 case Token::SUB:
1803 __ sub_d(result, left, right);
1804 break;
1805 case Token::MUL:
1806 __ mul_d(result, left, right);
1807 break;
1808 case Token::DIV:
1809 __ div_d(result, left, right);
1810 break;
1811 case Token::MOD: {
1812 // Save a0-a3 on the stack.
1813 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1814 __ MultiPush(saved_regs);
1815
1816 __ PrepareCallCFunction(0, 2, scratch0());
1817 __ MovToFloatParameters(left, right);
1818 __ CallCFunction(
1819 ExternalReference::mod_two_doubles_operation(isolate()),
1820 0, 2);
1821 // Move the result in the double result register.
1822 __ MovFromFloatResult(result);
1823
1824 // Restore saved register.
1825 __ MultiPop(saved_regs);
1826 break;
1827 }
1828 default:
1829 UNREACHABLE();
1830 break;
1831 }
1832}
1833
1834
1835void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1836 DCHECK(ToRegister(instr->context()).is(cp));
1837 DCHECK(ToRegister(instr->left()).is(a1));
1838 DCHECK(ToRegister(instr->right()).is(a0));
1839 DCHECK(ToRegister(instr->result()).is(v0));
1840
Ben Murdoch097c5b22016-05-18 11:27:45 +01001841 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001842 CallCode(code, RelocInfo::CODE_TARGET, instr);
1843 // Other arch use a nop here, to signal that there is no inlined
1844 // patchable code. Mips does not need the nop, since our marker
1845 // instruction (andi zero_reg) will never be used in normal code.
1846}
1847
1848
1849template<class InstrType>
1850void LCodeGen::EmitBranch(InstrType instr,
1851 Condition condition,
1852 Register src1,
1853 const Operand& src2) {
1854 int left_block = instr->TrueDestination(chunk_);
1855 int right_block = instr->FalseDestination(chunk_);
1856
1857 int next_block = GetNextEmittedBlock();
1858 if (right_block == left_block || condition == al) {
1859 EmitGoto(left_block);
1860 } else if (left_block == next_block) {
1861 __ Branch(chunk_->GetAssemblyLabel(right_block),
1862 NegateCondition(condition), src1, src2);
1863 } else if (right_block == next_block) {
1864 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1865 } else {
1866 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1867 __ Branch(chunk_->GetAssemblyLabel(right_block));
1868 }
1869}
1870
1871
1872template<class InstrType>
1873void LCodeGen::EmitBranchF(InstrType instr,
1874 Condition condition,
1875 FPURegister src1,
1876 FPURegister src2) {
1877 int right_block = instr->FalseDestination(chunk_);
1878 int left_block = instr->TrueDestination(chunk_);
1879
1880 int next_block = GetNextEmittedBlock();
1881 if (right_block == left_block) {
1882 EmitGoto(left_block);
1883 } else if (left_block == next_block) {
1884 __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1885 NegateFpuCondition(condition), src1, src2);
1886 } else if (right_block == next_block) {
1887 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1888 condition, src1, src2);
1889 } else {
1890 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1891 condition, src1, src2);
1892 __ Branch(chunk_->GetAssemblyLabel(right_block));
1893 }
1894}
1895
1896
1897template <class InstrType>
1898void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
1899 Register src1, const Operand& src2) {
1900 int true_block = instr->TrueDestination(chunk_);
1901 __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
1902}
1903
1904
1905template <class InstrType>
1906void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
1907 Register src1, const Operand& src2) {
1908 int false_block = instr->FalseDestination(chunk_);
1909 __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
1910}
1911
1912
1913template<class InstrType>
1914void LCodeGen::EmitFalseBranchF(InstrType instr,
1915 Condition condition,
1916 FPURegister src1,
1917 FPURegister src2) {
1918 int false_block = instr->FalseDestination(chunk_);
1919 __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
1920 condition, src1, src2);
1921}
1922
1923
1924void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
1925 __ stop("LDebugBreak");
1926}
1927
1928
1929void LCodeGen::DoBranch(LBranch* instr) {
1930 Representation r = instr->hydrogen()->value()->representation();
1931 if (r.IsInteger32() || r.IsSmi()) {
1932 DCHECK(!info()->IsStub());
1933 Register reg = ToRegister(instr->value());
1934 EmitBranch(instr, ne, reg, Operand(zero_reg));
1935 } else if (r.IsDouble()) {
1936 DCHECK(!info()->IsStub());
1937 DoubleRegister reg = ToDoubleRegister(instr->value());
1938 // Test the double value. Zero and NaN are false.
1939 EmitBranchF(instr, ogl, reg, kDoubleRegZero);
1940 } else {
1941 DCHECK(r.IsTagged());
1942 Register reg = ToRegister(instr->value());
1943 HType type = instr->hydrogen()->value()->type();
1944 if (type.IsBoolean()) {
1945 DCHECK(!info()->IsStub());
1946 __ LoadRoot(at, Heap::kTrueValueRootIndex);
1947 EmitBranch(instr, eq, reg, Operand(at));
1948 } else if (type.IsSmi()) {
1949 DCHECK(!info()->IsStub());
1950 EmitBranch(instr, ne, reg, Operand(zero_reg));
1951 } else if (type.IsJSArray()) {
1952 DCHECK(!info()->IsStub());
1953 EmitBranch(instr, al, zero_reg, Operand(zero_reg));
1954 } else if (type.IsHeapNumber()) {
1955 DCHECK(!info()->IsStub());
1956 DoubleRegister dbl_scratch = double_scratch0();
1957 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
1958 // Test the double value. Zero and NaN are false.
1959 EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
1960 } else if (type.IsString()) {
1961 DCHECK(!info()->IsStub());
1962 __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
1963 EmitBranch(instr, ne, at, Operand(zero_reg));
1964 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01001965 ToBooleanICStub::Types expected =
1966 instr->hydrogen()->expected_input_types();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001967 // Avoid deopts in the case where we've never executed this path before.
Ben Murdochda12d292016-06-02 14:46:10 +01001968 if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001969
Ben Murdochda12d292016-06-02 14:46:10 +01001970 if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001971 // undefined -> false.
1972 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
1973 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
1974 }
Ben Murdochda12d292016-06-02 14:46:10 +01001975 if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001976 // Boolean -> its value.
1977 __ LoadRoot(at, Heap::kTrueValueRootIndex);
1978 __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
1979 __ LoadRoot(at, Heap::kFalseValueRootIndex);
1980 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
1981 }
Ben Murdochda12d292016-06-02 14:46:10 +01001982 if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001983 // 'null' -> false.
1984 __ LoadRoot(at, Heap::kNullValueRootIndex);
1985 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
1986 }
1987
Ben Murdochda12d292016-06-02 14:46:10 +01001988 if (expected.Contains(ToBooleanICStub::SMI)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001989 // Smis: 0 -> false, all other -> true.
1990 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
1991 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
1992 } else if (expected.NeedsMap()) {
1993 // If we need a map later and have a Smi -> deopt.
1994 __ SmiTst(reg, at);
1995 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
1996 }
1997
1998 const Register map = scratch0();
1999 if (expected.NeedsMap()) {
2000 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2001 if (expected.CanBeUndetectable()) {
2002 // Undetectable -> false.
2003 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2004 __ And(at, at, Operand(1 << Map::kIsUndetectable));
2005 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2006 }
2007 }
2008
Ben Murdochda12d292016-06-02 14:46:10 +01002009 if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002010 // spec object -> true.
2011 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2012 __ Branch(instr->TrueLabel(chunk_),
2013 ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
2014 }
2015
Ben Murdochda12d292016-06-02 14:46:10 +01002016 if (expected.Contains(ToBooleanICStub::STRING)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002017 // String value -> false iff empty.
2018 Label not_string;
2019 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2020 __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2021 __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2022 __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2023 __ Branch(instr->FalseLabel(chunk_));
2024 __ bind(&not_string);
2025 }
2026
Ben Murdochda12d292016-06-02 14:46:10 +01002027 if (expected.Contains(ToBooleanICStub::SYMBOL)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002028 // Symbol value -> true.
2029 const Register scratch = scratch1();
2030 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2031 __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2032 }
2033
Ben Murdochda12d292016-06-02 14:46:10 +01002034 if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002035 // SIMD value -> true.
2036 const Register scratch = scratch1();
2037 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2038 __ Branch(instr->TrueLabel(chunk_), eq, scratch,
2039 Operand(SIMD128_VALUE_TYPE));
2040 }
2041
Ben Murdochda12d292016-06-02 14:46:10 +01002042 if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002043 // heap number -> false iff +0, -0, or NaN.
2044 DoubleRegister dbl_scratch = double_scratch0();
2045 Label not_heap_number;
2046 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2047 __ Branch(&not_heap_number, ne, map, Operand(at));
2048 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2049 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2050 ne, dbl_scratch, kDoubleRegZero);
2051 // Falls through if dbl_scratch == 0.
2052 __ Branch(instr->FalseLabel(chunk_));
2053 __ bind(&not_heap_number);
2054 }
2055
2056 if (!expected.IsGeneric()) {
2057 // We've seen something for the first time -> deopt.
2058 // This can only happen if we are not generic already.
2059 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
2060 Operand(zero_reg));
2061 }
2062 }
2063 }
2064}
2065
2066
2067void LCodeGen::EmitGoto(int block) {
2068 if (!IsNextEmittedBlock(block)) {
2069 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2070 }
2071}
2072
2073
2074void LCodeGen::DoGoto(LGoto* instr) {
2075 EmitGoto(instr->block_id());
2076}
2077
2078
2079Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2080 Condition cond = kNoCondition;
2081 switch (op) {
2082 case Token::EQ:
2083 case Token::EQ_STRICT:
2084 cond = eq;
2085 break;
2086 case Token::NE:
2087 case Token::NE_STRICT:
2088 cond = ne;
2089 break;
2090 case Token::LT:
2091 cond = is_unsigned ? lo : lt;
2092 break;
2093 case Token::GT:
2094 cond = is_unsigned ? hi : gt;
2095 break;
2096 case Token::LTE:
2097 cond = is_unsigned ? ls : le;
2098 break;
2099 case Token::GTE:
2100 cond = is_unsigned ? hs : ge;
2101 break;
2102 case Token::IN:
2103 case Token::INSTANCEOF:
2104 default:
2105 UNREACHABLE();
2106 }
2107 return cond;
2108}
2109
2110
2111void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2112 LOperand* left = instr->left();
2113 LOperand* right = instr->right();
2114 bool is_unsigned =
2115 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2116 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2117 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2118
2119 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2120 // We can statically evaluate the comparison.
2121 double left_val = ToDouble(LConstantOperand::cast(left));
2122 double right_val = ToDouble(LConstantOperand::cast(right));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002123 int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2124 ? instr->TrueDestination(chunk_)
2125 : instr->FalseDestination(chunk_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002126 EmitGoto(next_block);
2127 } else {
2128 if (instr->is_double()) {
2129 // Compare left and right as doubles and load the
2130 // resulting flags into the normal status register.
2131 FPURegister left_reg = ToDoubleRegister(left);
2132 FPURegister right_reg = ToDoubleRegister(right);
2133
2134 // If a NaN is involved, i.e. the result is unordered,
2135 // jump to false block label.
2136 __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2137 left_reg, right_reg);
2138
2139 EmitBranchF(instr, cond, left_reg, right_reg);
2140 } else {
2141 Register cmp_left;
2142 Operand cmp_right = Operand(0);
2143
2144 if (right->IsConstantOperand()) {
2145 int32_t value = ToInteger32(LConstantOperand::cast(right));
2146 if (instr->hydrogen_value()->representation().IsSmi()) {
2147 cmp_left = ToRegister(left);
2148 cmp_right = Operand(Smi::FromInt(value));
2149 } else {
2150 cmp_left = ToRegister(left);
2151 cmp_right = Operand(value);
2152 }
2153 } else if (left->IsConstantOperand()) {
2154 int32_t value = ToInteger32(LConstantOperand::cast(left));
2155 if (instr->hydrogen_value()->representation().IsSmi()) {
2156 cmp_left = ToRegister(right);
2157 cmp_right = Operand(Smi::FromInt(value));
2158 } else {
2159 cmp_left = ToRegister(right);
2160 cmp_right = Operand(value);
2161 }
2162 // We commuted the operands, so commute the condition.
2163 cond = CommuteCondition(cond);
2164 } else {
2165 cmp_left = ToRegister(left);
2166 cmp_right = Operand(ToRegister(right));
2167 }
2168
2169 EmitBranch(instr, cond, cmp_left, cmp_right);
2170 }
2171 }
2172}
2173
2174
2175void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2176 Register left = ToRegister(instr->left());
2177 Register right = ToRegister(instr->right());
2178
2179 EmitBranch(instr, eq, left, Operand(right));
2180}
2181
2182
2183void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2184 if (instr->hydrogen()->representation().IsTagged()) {
2185 Register input_reg = ToRegister(instr->object());
2186 __ li(at, Operand(factory()->the_hole_value()));
2187 EmitBranch(instr, eq, input_reg, Operand(at));
2188 return;
2189 }
2190
2191 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2192 EmitFalseBranchF(instr, eq, input_reg, input_reg);
2193
2194 Register scratch = scratch0();
2195 __ FmoveHigh(scratch, input_reg);
2196 EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
2197}
2198
2199
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002200Condition LCodeGen::EmitIsString(Register input,
2201 Register temp1,
2202 Label* is_not_string,
2203 SmiCheck check_needed = INLINE_SMI_CHECK) {
2204 if (check_needed == INLINE_SMI_CHECK) {
2205 __ JumpIfSmi(input, is_not_string);
2206 }
2207 __ GetObjectType(input, temp1, temp1);
2208
2209 return lt;
2210}
2211
2212
2213void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2214 Register reg = ToRegister(instr->value());
2215 Register temp1 = ToRegister(instr->temp());
2216
2217 SmiCheck check_needed =
2218 instr->hydrogen()->value()->type().IsHeapObject()
2219 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2220 Condition true_cond =
2221 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2222
2223 EmitBranch(instr, true_cond, temp1,
2224 Operand(FIRST_NONSTRING_TYPE));
2225}
2226
2227
2228void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2229 Register input_reg = EmitLoadRegister(instr->value(), at);
2230 __ And(at, input_reg, kSmiTagMask);
2231 EmitBranch(instr, eq, at, Operand(zero_reg));
2232}
2233
2234
2235void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2236 Register input = ToRegister(instr->value());
2237 Register temp = ToRegister(instr->temp());
2238
2239 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2240 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2241 }
2242 __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2243 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2244 __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2245 EmitBranch(instr, ne, at, Operand(zero_reg));
2246}
2247
2248
2249static Condition ComputeCompareCondition(Token::Value op) {
2250 switch (op) {
2251 case Token::EQ_STRICT:
2252 case Token::EQ:
2253 return eq;
2254 case Token::LT:
2255 return lt;
2256 case Token::GT:
2257 return gt;
2258 case Token::LTE:
2259 return le;
2260 case Token::GTE:
2261 return ge;
2262 default:
2263 UNREACHABLE();
2264 return kNoCondition;
2265 }
2266}
2267
2268
2269void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2270 DCHECK(ToRegister(instr->context()).is(cp));
2271 DCHECK(ToRegister(instr->left()).is(a1));
2272 DCHECK(ToRegister(instr->right()).is(a0));
2273
Ben Murdochda12d292016-06-02 14:46:10 +01002274 Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002275 CallCode(code, RelocInfo::CODE_TARGET, instr);
Ben Murdochda12d292016-06-02 14:46:10 +01002276 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2277 EmitBranch(instr, eq, v0, Operand(at));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002278}
2279
2280
2281static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2282 InstanceType from = instr->from();
2283 InstanceType to = instr->to();
2284 if (from == FIRST_TYPE) return to;
2285 DCHECK(from == to || to == LAST_TYPE);
2286 return from;
2287}
2288
2289
2290static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2291 InstanceType from = instr->from();
2292 InstanceType to = instr->to();
2293 if (from == to) return eq;
2294 if (to == LAST_TYPE) return hs;
2295 if (from == FIRST_TYPE) return ls;
2296 UNREACHABLE();
2297 return eq;
2298}
2299
2300
2301void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2302 Register scratch = scratch0();
2303 Register input = ToRegister(instr->value());
2304
2305 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2306 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2307 }
2308
2309 __ GetObjectType(input, scratch, scratch);
2310 EmitBranch(instr,
2311 BranchCondition(instr->hydrogen()),
2312 scratch,
2313 Operand(TestType(instr->hydrogen())));
2314}
2315
2316
2317void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2318 Register input = ToRegister(instr->value());
2319 Register result = ToRegister(instr->result());
2320
2321 __ AssertString(input);
2322
2323 __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
2324 __ IndexFromHash(result, result);
2325}
2326
2327
2328void LCodeGen::DoHasCachedArrayIndexAndBranch(
2329 LHasCachedArrayIndexAndBranch* instr) {
2330 Register input = ToRegister(instr->value());
2331 Register scratch = scratch0();
2332
2333 __ lw(scratch,
2334 FieldMemOperand(input, String::kHashFieldOffset));
2335 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2336 EmitBranch(instr, eq, at, Operand(zero_reg));
2337}
2338
2339
2340// Branches to a label or falls through with the answer in flags. Trashes
2341// the temp registers, but not the input.
2342void LCodeGen::EmitClassOfTest(Label* is_true,
2343 Label* is_false,
2344 Handle<String>class_name,
2345 Register input,
2346 Register temp,
2347 Register temp2) {
2348 DCHECK(!input.is(temp));
2349 DCHECK(!input.is(temp2));
2350 DCHECK(!temp.is(temp2));
2351
2352 __ JumpIfSmi(input, is_false);
2353 __ GetObjectType(input, temp, temp2);
Ben Murdochda12d292016-06-02 14:46:10 +01002354 STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002355 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002356 __ Branch(is_true, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002357 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01002358 __ Branch(is_false, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002359 }
2360
2361 // Check if the constructor in the map is a function.
2362 Register instance_type = scratch1();
2363 DCHECK(!instance_type.is(temp));
2364 __ GetMapConstructor(temp, temp, temp2, instance_type);
2365
2366 // Objects with a non-function constructor have class 'Object'.
2367 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2368 __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2369 } else {
2370 __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2371 }
2372
2373 // temp now contains the constructor function. Grab the
2374 // instance class name from there.
2375 __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2376 __ lw(temp, FieldMemOperand(temp,
2377 SharedFunctionInfo::kInstanceClassNameOffset));
2378 // The class name we are testing against is internalized since it's a literal.
2379 // The name in the constructor is internalized because of the way the context
2380 // is booted. This routine isn't expected to work for random API-created
2381 // classes and it doesn't have to because you can't access it with natives
2382 // syntax. Since both sides are internalized it is sufficient to use an
2383 // identity comparison.
2384
2385 // End with the address of this class_name instance in temp register.
2386 // On MIPS, the caller must do the comparison with Handle<String>class_name.
2387}
2388
2389
2390void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2391 Register input = ToRegister(instr->value());
2392 Register temp = scratch0();
2393 Register temp2 = ToRegister(instr->temp());
2394 Handle<String> class_name = instr->hydrogen()->class_name();
2395
2396 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2397 class_name, input, temp, temp2);
2398
2399 EmitBranch(instr, eq, temp, Operand(class_name));
2400}
2401
2402
2403void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2404 Register reg = ToRegister(instr->value());
2405 Register temp = ToRegister(instr->temp());
2406
2407 __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2408 EmitBranch(instr, eq, temp, Operand(instr->map()));
2409}
2410
2411
2412void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2413 DCHECK(ToRegister(instr->context()).is(cp));
2414 DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
2415 DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
2416 DCHECK(ToRegister(instr->result()).is(v0));
2417 InstanceOfStub stub(isolate());
2418 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2419}
2420
2421
2422void LCodeGen::DoHasInPrototypeChainAndBranch(
2423 LHasInPrototypeChainAndBranch* instr) {
2424 Register const object = ToRegister(instr->object());
2425 Register const object_map = scratch0();
2426 Register const object_instance_type = scratch1();
2427 Register const object_prototype = object_map;
2428 Register const prototype = ToRegister(instr->prototype());
2429
2430 // The {object} must be a spec object. It's sufficient to know that {object}
2431 // is not a smi, since all other non-spec objects have {null} prototypes and
2432 // will be ruled out below.
2433 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2434 __ SmiTst(object, at);
2435 EmitFalseBranch(instr, eq, at, Operand(zero_reg));
2436 }
2437
2438 // Loop through the {object}s prototype chain looking for the {prototype}.
2439 __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2440 Label loop;
2441 __ bind(&loop);
2442
2443 // Deoptimize if the object needs to be access checked.
2444 __ lbu(object_instance_type,
2445 FieldMemOperand(object_map, Map::kBitFieldOffset));
2446 __ And(object_instance_type, object_instance_type,
2447 Operand(1 << Map::kIsAccessCheckNeeded));
2448 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
2449 Operand(zero_reg));
2450 // Deoptimize for proxies.
2451 __ lbu(object_instance_type,
2452 FieldMemOperand(object_map, Map::kInstanceTypeOffset));
2453 DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
2454 Operand(JS_PROXY_TYPE));
2455
2456 __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
2457 EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
2458 __ LoadRoot(at, Heap::kNullValueRootIndex);
2459 EmitFalseBranch(instr, eq, object_prototype, Operand(at));
2460 __ Branch(USE_DELAY_SLOT, &loop);
2461 __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
2462}
2463
2464
2465void LCodeGen::DoCmpT(LCmpT* instr) {
2466 DCHECK(ToRegister(instr->context()).is(cp));
2467 Token::Value op = instr->op();
2468
Ben Murdoch097c5b22016-05-18 11:27:45 +01002469 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002470 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2471 // On MIPS there is no need for a "no inlined smi code" marker (nop).
2472
2473 Condition condition = ComputeCompareCondition(op);
2474 // A minor optimization that relies on LoadRoot always emitting one
2475 // instruction.
2476 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2477 Label done, check;
2478 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2479 __ bind(&check);
2480 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2481 DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
2482 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2483 __ bind(&done);
2484}
2485
2486
2487void LCodeGen::DoReturn(LReturn* instr) {
2488 if (FLAG_trace && info()->IsOptimizing()) {
2489 // Push the return value on the stack as the parameter.
2490 // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2491 // managed by the register allocator and tearing down the frame, it's
2492 // safe to write to the context register.
2493 __ push(v0);
2494 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2495 __ CallRuntime(Runtime::kTraceExit);
2496 }
2497 if (info()->saves_caller_doubles()) {
2498 RestoreCallerDoubles();
2499 }
2500 if (NeedsEagerFrame()) {
2501 __ mov(sp, fp);
2502 __ Pop(ra, fp);
2503 }
2504 if (instr->has_constant_parameter_count()) {
2505 int parameter_count = ToInteger32(instr->constant_parameter_count());
2506 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2507 if (sp_delta != 0) {
2508 __ Addu(sp, sp, Operand(sp_delta));
2509 }
2510 } else {
2511 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2512 Register reg = ToRegister(instr->parameter_count());
2513 // The argument count parameter is a smi
2514 __ SmiUntag(reg);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002515 __ Lsa(sp, sp, reg, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002516 }
2517
2518 __ Jump(ra);
2519}
2520
2521
2522template <class T>
2523void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2524 Register vector_register = ToRegister(instr->temp_vector());
2525 Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2526 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2527 DCHECK(slot_register.is(a0));
2528
2529 AllowDeferredHandleDereference vector_structure_check;
2530 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2531 __ li(vector_register, vector);
2532 // No need to allocate this register.
2533 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2534 int index = vector->GetIndex(slot);
2535 __ li(slot_register, Operand(Smi::FromInt(index)));
2536}
2537
2538
2539template <class T>
2540void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2541 Register vector_register = ToRegister(instr->temp_vector());
2542 Register slot_register = ToRegister(instr->temp_slot());
2543
2544 AllowDeferredHandleDereference vector_structure_check;
2545 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2546 __ li(vector_register, vector);
2547 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2548 int index = vector->GetIndex(slot);
2549 __ li(slot_register, Operand(Smi::FromInt(index)));
2550}
2551
2552
2553void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2554 DCHECK(ToRegister(instr->context()).is(cp));
2555 DCHECK(ToRegister(instr->global_object())
2556 .is(LoadDescriptor::ReceiverRegister()));
2557 DCHECK(ToRegister(instr->result()).is(v0));
2558
2559 __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2560 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002561 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2562 isolate(), instr->typeof_mode(), PREMONOMORPHIC)
2563 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002564 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2565}
2566
2567
2568void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2569 Register context = ToRegister(instr->context());
2570 Register result = ToRegister(instr->result());
2571
2572 __ lw(result, ContextMemOperand(context, instr->slot_index()));
2573 if (instr->hydrogen()->RequiresHoleCheck()) {
2574 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2575
2576 if (instr->hydrogen()->DeoptimizesOnHole()) {
2577 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
2578 } else {
2579 Label is_not_hole;
2580 __ Branch(&is_not_hole, ne, result, Operand(at));
2581 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2582 __ bind(&is_not_hole);
2583 }
2584 }
2585}
2586
2587
2588void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2589 Register context = ToRegister(instr->context());
2590 Register value = ToRegister(instr->value());
2591 Register scratch = scratch0();
2592 MemOperand target = ContextMemOperand(context, instr->slot_index());
2593
2594 Label skip_assignment;
2595
2596 if (instr->hydrogen()->RequiresHoleCheck()) {
2597 __ lw(scratch, target);
2598 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2599
2600 if (instr->hydrogen()->DeoptimizesOnHole()) {
2601 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
2602 } else {
2603 __ Branch(&skip_assignment, ne, scratch, Operand(at));
2604 }
2605 }
2606
2607 __ sw(value, target);
2608 if (instr->hydrogen()->NeedsWriteBarrier()) {
2609 SmiCheck check_needed =
2610 instr->hydrogen()->value()->type().IsHeapObject()
2611 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2612 __ RecordWriteContextSlot(context,
2613 target.offset(),
2614 value,
2615 scratch0(),
2616 GetRAState(),
2617 kSaveFPRegs,
2618 EMIT_REMEMBERED_SET,
2619 check_needed);
2620 }
2621
2622 __ bind(&skip_assignment);
2623}
2624
2625
2626void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2627 HObjectAccess access = instr->hydrogen()->access();
2628 int offset = access.offset();
2629 Register object = ToRegister(instr->object());
2630
2631 if (access.IsExternalMemory()) {
2632 Register result = ToRegister(instr->result());
2633 MemOperand operand = MemOperand(object, offset);
2634 __ Load(result, operand, access.representation());
2635 return;
2636 }
2637
2638 if (instr->hydrogen()->representation().IsDouble()) {
2639 DoubleRegister result = ToDoubleRegister(instr->result());
2640 __ ldc1(result, FieldMemOperand(object, offset));
2641 return;
2642 }
2643
2644 Register result = ToRegister(instr->result());
2645 if (!access.IsInobject()) {
2646 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2647 object = result;
2648 }
2649 MemOperand operand = FieldMemOperand(object, offset);
2650 __ Load(result, operand, access.representation());
2651}
2652
2653
2654void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2655 DCHECK(ToRegister(instr->context()).is(cp));
2656 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2657 DCHECK(ToRegister(instr->result()).is(v0));
2658
2659 // Name is always in a2.
2660 __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2661 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002662 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2663 isolate(), NOT_INSIDE_TYPEOF,
2664 instr->hydrogen()->initialization_state())
2665 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002666 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2667}
2668
2669
2670void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2671 Register scratch = scratch0();
2672 Register function = ToRegister(instr->function());
2673 Register result = ToRegister(instr->result());
2674
2675 // Get the prototype or initial map from the function.
2676 __ lw(result,
2677 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2678
2679 // Check that the function has a prototype or an initial map.
2680 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2681 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
2682
2683 // If the function does not have an initial map, we're done.
2684 Label done;
2685 __ GetObjectType(result, scratch, scratch);
2686 __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2687
2688 // Get the prototype from the initial map.
2689 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2690
2691 // All done.
2692 __ bind(&done);
2693}
2694
2695
2696void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2697 Register result = ToRegister(instr->result());
2698 __ LoadRoot(result, instr->index());
2699}
2700
2701
2702void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2703 Register arguments = ToRegister(instr->arguments());
2704 Register result = ToRegister(instr->result());
2705 // There are two words between the frame pointer and the last argument.
2706 // Subtracting from length accounts for one of them add one more.
2707 if (instr->length()->IsConstantOperand()) {
2708 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2709 if (instr->index()->IsConstantOperand()) {
2710 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2711 int index = (const_length - const_index) + 1;
2712 __ lw(result, MemOperand(arguments, index * kPointerSize));
2713 } else {
2714 Register index = ToRegister(instr->index());
2715 __ li(at, Operand(const_length + 1));
2716 __ Subu(result, at, index);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002717 __ Lsa(at, arguments, result, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002718 __ lw(result, MemOperand(at));
2719 }
2720 } else if (instr->index()->IsConstantOperand()) {
2721 Register length = ToRegister(instr->length());
2722 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2723 int loc = const_index - 1;
2724 if (loc != 0) {
2725 __ Subu(result, length, Operand(loc));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002726 __ Lsa(at, arguments, result, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002727 __ lw(result, MemOperand(at));
2728 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002729 __ Lsa(at, arguments, length, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002730 __ lw(result, MemOperand(at));
2731 }
2732 } else {
2733 Register length = ToRegister(instr->length());
2734 Register index = ToRegister(instr->index());
2735 __ Subu(result, length, index);
2736 __ Addu(result, result, 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002737 __ Lsa(at, arguments, result, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002738 __ lw(result, MemOperand(at));
2739 }
2740}
2741
2742
2743void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2744 Register external_pointer = ToRegister(instr->elements());
2745 Register key = no_reg;
2746 ElementsKind elements_kind = instr->elements_kind();
2747 bool key_is_constant = instr->key()->IsConstantOperand();
2748 int constant_key = 0;
2749 if (key_is_constant) {
2750 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2751 if (constant_key & 0xF0000000) {
2752 Abort(kArrayIndexConstantValueTooBig);
2753 }
2754 } else {
2755 key = ToRegister(instr->key());
2756 }
2757 int element_size_shift = ElementsKindToShiftSize(elements_kind);
2758 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2759 ? (element_size_shift - kSmiTagSize) : element_size_shift;
2760 int base_offset = instr->base_offset();
2761
2762 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2763 FPURegister result = ToDoubleRegister(instr->result());
2764 if (key_is_constant) {
2765 __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
2766 } else {
2767 __ sll(scratch0(), key, shift_size);
2768 __ Addu(scratch0(), scratch0(), external_pointer);
2769 }
2770 if (elements_kind == FLOAT32_ELEMENTS) {
2771 __ lwc1(result, MemOperand(scratch0(), base_offset));
2772 __ cvt_d_s(result, result);
2773 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2774 __ ldc1(result, MemOperand(scratch0(), base_offset));
2775 }
2776 } else {
2777 Register result = ToRegister(instr->result());
2778 MemOperand mem_operand = PrepareKeyedOperand(
2779 key, external_pointer, key_is_constant, constant_key,
2780 element_size_shift, shift_size, base_offset);
2781 switch (elements_kind) {
2782 case INT8_ELEMENTS:
2783 __ lb(result, mem_operand);
2784 break;
2785 case UINT8_ELEMENTS:
2786 case UINT8_CLAMPED_ELEMENTS:
2787 __ lbu(result, mem_operand);
2788 break;
2789 case INT16_ELEMENTS:
2790 __ lh(result, mem_operand);
2791 break;
2792 case UINT16_ELEMENTS:
2793 __ lhu(result, mem_operand);
2794 break;
2795 case INT32_ELEMENTS:
2796 __ lw(result, mem_operand);
2797 break;
2798 case UINT32_ELEMENTS:
2799 __ lw(result, mem_operand);
2800 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2801 DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
2802 result, Operand(0x80000000));
2803 }
2804 break;
2805 case FLOAT32_ELEMENTS:
2806 case FLOAT64_ELEMENTS:
2807 case FAST_DOUBLE_ELEMENTS:
2808 case FAST_ELEMENTS:
2809 case FAST_SMI_ELEMENTS:
2810 case FAST_HOLEY_DOUBLE_ELEMENTS:
2811 case FAST_HOLEY_ELEMENTS:
2812 case FAST_HOLEY_SMI_ELEMENTS:
2813 case DICTIONARY_ELEMENTS:
2814 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2815 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002816 case FAST_STRING_WRAPPER_ELEMENTS:
2817 case SLOW_STRING_WRAPPER_ELEMENTS:
2818 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002819 UNREACHABLE();
2820 break;
2821 }
2822 }
2823}
2824
2825
2826void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2827 Register elements = ToRegister(instr->elements());
2828 bool key_is_constant = instr->key()->IsConstantOperand();
2829 Register key = no_reg;
2830 DoubleRegister result = ToDoubleRegister(instr->result());
2831 Register scratch = scratch0();
2832
2833 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2834
2835 int base_offset = instr->base_offset();
2836 if (key_is_constant) {
2837 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2838 if (constant_key & 0xF0000000) {
2839 Abort(kArrayIndexConstantValueTooBig);
2840 }
2841 base_offset += constant_key * kDoubleSize;
2842 }
2843 __ Addu(scratch, elements, Operand(base_offset));
2844
2845 if (!key_is_constant) {
2846 key = ToRegister(instr->key());
2847 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2848 ? (element_size_shift - kSmiTagSize) : element_size_shift;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002849 __ Lsa(scratch, scratch, key, shift_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002850 }
2851
2852 __ ldc1(result, MemOperand(scratch));
2853
2854 if (instr->hydrogen()->RequiresHoleCheck()) {
2855 __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
2856 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
2857 Operand(kHoleNanUpper32));
2858 }
2859}
2860
2861
2862void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2863 Register elements = ToRegister(instr->elements());
2864 Register result = ToRegister(instr->result());
2865 Register scratch = scratch0();
2866 Register store_base = scratch;
2867 int offset = instr->base_offset();
2868
2869 if (instr->key()->IsConstantOperand()) {
2870 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2871 offset += ToInteger32(const_operand) * kPointerSize;
2872 store_base = elements;
2873 } else {
2874 Register key = ToRegister(instr->key());
2875 // Even though the HLoadKeyed instruction forces the input
2876 // representation for the key to be an integer, the input gets replaced
2877 // during bound check elimination with the index argument to the bounds
2878 // check, which can be tagged, so that case must be handled here, too.
2879 if (instr->hydrogen()->key()->representation().IsSmi()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002880 __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002881 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002882 __ Lsa(scratch, elements, key, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002883 }
2884 }
2885 __ lw(result, MemOperand(store_base, offset));
2886
2887 // Check for the hole value.
2888 if (instr->hydrogen()->RequiresHoleCheck()) {
2889 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2890 __ SmiTst(result, scratch);
2891 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
2892 Operand(zero_reg));
2893 } else {
2894 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2895 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
2896 }
2897 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
2898 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
2899 Label done;
2900 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2901 __ Branch(&done, ne, result, Operand(scratch));
2902 if (info()->IsStub()) {
2903 // A stub can safely convert the hole to undefined only if the array
2904 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
2905 // it needs to bail out.
2906 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
2907 __ lw(result, FieldMemOperand(result, Cell::kValueOffset));
2908 DeoptimizeIf(ne, instr, Deoptimizer::kHole, result,
2909 Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
2910 }
2911 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2912 __ bind(&done);
2913 }
2914}
2915
2916
2917void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
2918 if (instr->is_fixed_typed_array()) {
2919 DoLoadKeyedExternalArray(instr);
2920 } else if (instr->hydrogen()->representation().IsDouble()) {
2921 DoLoadKeyedFixedDoubleArray(instr);
2922 } else {
2923 DoLoadKeyedFixedArray(instr);
2924 }
2925}
2926
2927
2928MemOperand LCodeGen::PrepareKeyedOperand(Register key,
2929 Register base,
2930 bool key_is_constant,
2931 int constant_key,
2932 int element_size,
2933 int shift_size,
2934 int base_offset) {
2935 if (key_is_constant) {
2936 return MemOperand(base, (constant_key << element_size) + base_offset);
2937 }
2938
2939 if (base_offset == 0) {
2940 if (shift_size >= 0) {
2941 __ sll(scratch0(), key, shift_size);
2942 __ Addu(scratch0(), base, scratch0());
2943 return MemOperand(scratch0());
2944 } else {
2945 DCHECK_EQ(-1, shift_size);
2946 __ srl(scratch0(), key, 1);
2947 __ Addu(scratch0(), base, scratch0());
2948 return MemOperand(scratch0());
2949 }
2950 }
2951
2952 if (shift_size >= 0) {
2953 __ sll(scratch0(), key, shift_size);
2954 __ Addu(scratch0(), base, scratch0());
2955 return MemOperand(scratch0(), base_offset);
2956 } else {
2957 DCHECK_EQ(-1, shift_size);
2958 __ sra(scratch0(), key, 1);
2959 __ Addu(scratch0(), base, scratch0());
2960 return MemOperand(scratch0(), base_offset);
2961 }
2962}
2963
2964
2965void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2966 DCHECK(ToRegister(instr->context()).is(cp));
2967 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2968 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
2969
2970 if (instr->hydrogen()->HasVectorAndSlot()) {
2971 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
2972 }
2973
2974 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
Ben Murdoch097c5b22016-05-18 11:27:45 +01002975 isolate(), instr->hydrogen()->initialization_state())
2976 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002977 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2978}
2979
2980
2981void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2982 Register scratch = scratch0();
2983 Register temp = scratch1();
2984 Register result = ToRegister(instr->result());
2985
2986 if (instr->hydrogen()->from_inlined()) {
2987 __ Subu(result, sp, 2 * kPointerSize);
Ben Murdochda12d292016-06-02 14:46:10 +01002988 } else if (instr->hydrogen()->arguments_adaptor()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002989 // Check if the calling frame is an arguments adaptor frame.
2990 Label done, adapted;
2991 __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01002992 __ lw(result,
2993 MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002994 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2995
2996 // Result is the frame pointer for the frame if not adapted and for the real
2997 // frame below the adaptor frame if adapted.
2998 __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
2999 __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
Ben Murdochda12d292016-06-02 14:46:10 +01003000 } else {
3001 __ mov(result, fp);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003002 }
3003}
3004
3005
3006void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3007 Register elem = ToRegister(instr->elements());
3008 Register result = ToRegister(instr->result());
3009
3010 Label done;
3011
3012 // If no arguments adaptor frame the number of arguments is fixed.
3013 __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
3014 __ Branch(&done, eq, fp, Operand(elem));
3015
3016 // Arguments adaptor frame present. Get argument length from there.
3017 __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3018 __ lw(result,
3019 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3020 __ SmiUntag(result);
3021
3022 // Argument length is in result register.
3023 __ bind(&done);
3024}
3025
3026
3027void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3028 Register receiver = ToRegister(instr->receiver());
3029 Register function = ToRegister(instr->function());
3030 Register result = ToRegister(instr->result());
3031 Register scratch = scratch0();
3032
3033 // If the receiver is null or undefined, we have to pass the global
3034 // object as a receiver to normal functions. Values have to be
3035 // passed unchanged to builtins and strict-mode functions.
3036 Label global_object, result_in_receiver;
3037
3038 if (!instr->hydrogen()->known_function()) {
3039 // Do not transform the receiver to object for strict mode
3040 // functions.
3041 __ lw(scratch,
3042 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3043 __ lw(scratch,
3044 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3045
3046 // Do not transform the receiver to object for builtins.
3047 int32_t strict_mode_function_mask =
3048 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3049 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
3050 __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
3051 __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
3052 }
3053
3054 // Normal function. Replace undefined or null with global receiver.
3055 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3056 __ Branch(&global_object, eq, receiver, Operand(scratch));
3057 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3058 __ Branch(&global_object, eq, receiver, Operand(scratch));
3059
3060 // Deoptimize if the receiver is not a JS object.
3061 __ SmiTst(receiver, scratch);
3062 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
3063
3064 __ GetObjectType(receiver, scratch, scratch);
3065 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
3066 Operand(FIRST_JS_RECEIVER_TYPE));
3067
3068 __ Branch(&result_in_receiver);
3069 __ bind(&global_object);
3070 __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
3071 __ lw(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3072 __ lw(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3073
3074 if (result.is(receiver)) {
3075 __ bind(&result_in_receiver);
3076 } else {
3077 Label result_ok;
3078 __ Branch(&result_ok);
3079 __ bind(&result_in_receiver);
3080 __ mov(result, receiver);
3081 __ bind(&result_ok);
3082 }
3083}
3084
3085
3086void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3087 Register receiver = ToRegister(instr->receiver());
3088 Register function = ToRegister(instr->function());
3089 Register length = ToRegister(instr->length());
3090 Register elements = ToRegister(instr->elements());
3091 Register scratch = scratch0();
3092 DCHECK(receiver.is(a0)); // Used for parameter count.
3093 DCHECK(function.is(a1)); // Required by InvokeFunction.
3094 DCHECK(ToRegister(instr->result()).is(v0));
3095
3096 // Copy the arguments to this function possibly from the
3097 // adaptor frame below it.
3098 const uint32_t kArgumentsLimit = 1 * KB;
3099 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
3100 Operand(kArgumentsLimit));
3101
3102 // Push the receiver and use the register to keep the original
3103 // number of arguments.
3104 __ push(receiver);
3105 __ Move(receiver, length);
3106 // The arguments are at a one pointer size offset from elements.
3107 __ Addu(elements, elements, Operand(1 * kPointerSize));
3108
3109 // Loop through the arguments pushing them onto the execution
3110 // stack.
3111 Label invoke, loop;
3112 // length is a small non-negative integer, due to the test above.
3113 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3114 __ sll(scratch, length, 2);
3115 __ bind(&loop);
3116 __ Addu(scratch, elements, scratch);
3117 __ lw(scratch, MemOperand(scratch));
3118 __ push(scratch);
3119 __ Subu(length, length, Operand(1));
3120 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3121 __ sll(scratch, length, 2);
3122
3123 __ bind(&invoke);
Ben Murdochda12d292016-06-02 14:46:10 +01003124
3125 InvokeFlag flag = CALL_FUNCTION;
3126 if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
3127 DCHECK(!info()->saves_caller_doubles());
3128 // TODO(ishell): drop current frame before pushing arguments to the stack.
3129 flag = JUMP_FUNCTION;
3130 ParameterCount actual(a0);
3131 // It is safe to use t0, t1 and t2 as scratch registers here given that
3132 // we are not going to return to caller function anyway.
3133 PrepareForTailCall(actual, t0, t1, t2);
3134 }
3135
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003136 DCHECK(instr->HasPointerMap());
3137 LPointerMap* pointers = instr->pointer_map();
Ben Murdochda12d292016-06-02 14:46:10 +01003138 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003139 // The number of arguments is stored in receiver which is a0, as expected
3140 // by InvokeFunction.
3141 ParameterCount actual(receiver);
Ben Murdochda12d292016-06-02 14:46:10 +01003142 __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003143}
3144
3145
3146void LCodeGen::DoPushArgument(LPushArgument* instr) {
3147 LOperand* argument = instr->value();
3148 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3149 Abort(kDoPushArgumentNotImplementedForDoubleType);
3150 } else {
3151 Register argument_reg = EmitLoadRegister(argument, at);
3152 __ push(argument_reg);
3153 }
3154}
3155
3156
3157void LCodeGen::DoDrop(LDrop* instr) {
3158 __ Drop(instr->count());
3159}
3160
3161
3162void LCodeGen::DoThisFunction(LThisFunction* instr) {
3163 Register result = ToRegister(instr->result());
3164 __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3165}
3166
3167
3168void LCodeGen::DoContext(LContext* instr) {
3169 // If there is a non-return use, the context must be moved to a register.
3170 Register result = ToRegister(instr->result());
3171 if (info()->IsOptimizing()) {
3172 __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3173 } else {
3174 // If there is no frame, the context must be in cp.
3175 DCHECK(result.is(cp));
3176 }
3177}
3178
3179
3180void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3181 DCHECK(ToRegister(instr->context()).is(cp));
3182 __ li(scratch0(), instr->hydrogen()->pairs());
3183 __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3184 __ Push(scratch0(), scratch1());
3185 CallRuntime(Runtime::kDeclareGlobals, instr);
3186}
3187
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003188void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3189 int formal_parameter_count, int arity,
Ben Murdochda12d292016-06-02 14:46:10 +01003190 bool is_tail_call, LInstruction* instr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003191 bool dont_adapt_arguments =
3192 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3193 bool can_invoke_directly =
3194 dont_adapt_arguments || formal_parameter_count == arity;
3195
3196 Register function_reg = a1;
3197 LPointerMap* pointers = instr->pointer_map();
3198
3199 if (can_invoke_directly) {
3200 // Change context.
3201 __ lw(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3202
3203 // Always initialize new target and number of actual arguments.
3204 __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
3205 __ li(a0, Operand(arity));
3206
Ben Murdochda12d292016-06-02 14:46:10 +01003207 bool is_self_call = function.is_identical_to(info()->closure());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003208
Ben Murdochda12d292016-06-02 14:46:10 +01003209 // Invoke function.
3210 if (is_self_call) {
3211 Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
3212 if (is_tail_call) {
3213 __ Jump(self, RelocInfo::CODE_TARGET);
3214 } else {
3215 __ Call(self, RelocInfo::CODE_TARGET);
3216 }
3217 } else {
3218 __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3219 if (is_tail_call) {
3220 __ Jump(at);
3221 } else {
3222 __ Call(at);
3223 }
3224 }
3225
3226 if (!is_tail_call) {
3227 // Set up deoptimization.
3228 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3229 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003230 } else {
3231 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdochda12d292016-06-02 14:46:10 +01003232 ParameterCount actual(arity);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003233 ParameterCount expected(formal_parameter_count);
Ben Murdochda12d292016-06-02 14:46:10 +01003234 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3235 __ InvokeFunction(function_reg, expected, actual, flag, generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003236 }
3237}
3238
3239
3240void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3241 DCHECK(instr->context() != NULL);
3242 DCHECK(ToRegister(instr->context()).is(cp));
3243 Register input = ToRegister(instr->value());
3244 Register result = ToRegister(instr->result());
3245 Register scratch = scratch0();
3246
3247 // Deoptimize if not a heap number.
3248 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3249 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3250 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
3251
3252 Label done;
3253 Register exponent = scratch0();
3254 scratch = no_reg;
3255 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3256 // Check the sign of the argument. If the argument is positive, just
3257 // return it.
3258 __ Move(result, input);
3259 __ And(at, exponent, Operand(HeapNumber::kSignMask));
3260 __ Branch(&done, eq, at, Operand(zero_reg));
3261
3262 // Input is negative. Reverse its sign.
3263 // Preserve the value of all registers.
3264 {
3265 PushSafepointRegistersScope scope(this);
3266
3267 // Registers were saved at the safepoint, so we can use
3268 // many scratch registers.
3269 Register tmp1 = input.is(a1) ? a0 : a1;
3270 Register tmp2 = input.is(a2) ? a0 : a2;
3271 Register tmp3 = input.is(a3) ? a0 : a3;
3272 Register tmp4 = input.is(t0) ? a0 : t0;
3273
3274 // exponent: floating point exponent value.
3275
3276 Label allocated, slow;
3277 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3278 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3279 __ Branch(&allocated);
3280
3281 // Slow case: Call the runtime system to do the number allocation.
3282 __ bind(&slow);
3283
3284 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3285 instr->context());
3286 // Set the pointer to the new heap number in tmp.
3287 if (!tmp1.is(v0))
3288 __ mov(tmp1, v0);
3289 // Restore input_reg after call to runtime.
3290 __ LoadFromSafepointRegisterSlot(input, input);
3291 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3292
3293 __ bind(&allocated);
3294 // exponent: floating point exponent value.
3295 // tmp1: allocated heap number.
3296 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3297 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3298 __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3299 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3300
3301 __ StoreToSafepointRegisterSlot(tmp1, result);
3302 }
3303
3304 __ bind(&done);
3305}
3306
3307
3308void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3309 Register input = ToRegister(instr->value());
3310 Register result = ToRegister(instr->result());
3311 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3312 Label done;
3313 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3314 __ mov(result, input);
3315 __ subu(result, zero_reg, input);
3316 // Overflow if result is still negative, i.e. 0x80000000.
3317 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
3318 __ bind(&done);
3319}
3320
3321
3322void LCodeGen::DoMathAbs(LMathAbs* instr) {
3323 // Class for deferred case.
3324 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3325 public:
3326 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3327 : LDeferredCode(codegen), instr_(instr) { }
3328 void Generate() override {
3329 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3330 }
3331 LInstruction* instr() override { return instr_; }
3332
3333 private:
3334 LMathAbs* instr_;
3335 };
3336
3337 Representation r = instr->hydrogen()->value()->representation();
3338 if (r.IsDouble()) {
3339 FPURegister input = ToDoubleRegister(instr->value());
3340 FPURegister result = ToDoubleRegister(instr->result());
3341 __ abs_d(result, input);
3342 } else if (r.IsSmiOrInteger32()) {
3343 EmitIntegerMathAbs(instr);
3344 } else {
3345 // Representation is tagged.
3346 DeferredMathAbsTaggedHeapNumber* deferred =
3347 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3348 Register input = ToRegister(instr->value());
3349 // Smi check.
3350 __ JumpIfNotSmi(input, deferred->entry());
3351 // If smi, handle it directly.
3352 EmitIntegerMathAbs(instr);
3353 __ bind(deferred->exit());
3354 }
3355}
3356
3357
3358void LCodeGen::DoMathFloor(LMathFloor* instr) {
3359 DoubleRegister input = ToDoubleRegister(instr->value());
3360 Register result = ToRegister(instr->result());
3361 Register scratch1 = scratch0();
3362 Register except_flag = ToRegister(instr->temp());
3363
3364 __ EmitFPUTruncate(kRoundToMinusInf,
3365 result,
3366 input,
3367 scratch1,
3368 double_scratch0(),
3369 except_flag);
3370
3371 // Deopt if the operation did not succeed.
3372 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3373 Operand(zero_reg));
3374
3375 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3376 // Test for -0.
3377 Label done;
3378 __ Branch(&done, ne, result, Operand(zero_reg));
3379 __ Mfhc1(scratch1, input);
3380 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3381 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
3382 Operand(zero_reg));
3383 __ bind(&done);
3384 }
3385}
3386
3387
3388void LCodeGen::DoMathRound(LMathRound* instr) {
3389 DoubleRegister input = ToDoubleRegister(instr->value());
3390 Register result = ToRegister(instr->result());
3391 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3392 Register scratch = scratch0();
3393 Label done, check_sign_on_zero;
3394
3395 // Extract exponent bits.
3396 __ Mfhc1(result, input);
3397 __ Ext(scratch,
3398 result,
3399 HeapNumber::kExponentShift,
3400 HeapNumber::kExponentBits);
3401
3402 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3403 Label skip1;
3404 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3405 __ mov(result, zero_reg);
3406 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3407 __ Branch(&check_sign_on_zero);
3408 } else {
3409 __ Branch(&done);
3410 }
3411 __ bind(&skip1);
3412
3413 // The following conversion will not work with numbers
3414 // outside of ]-2^32, 2^32[.
3415 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
3416 Operand(HeapNumber::kExponentBias + 32));
3417
3418 // Save the original sign for later comparison.
3419 __ And(scratch, result, Operand(HeapNumber::kSignMask));
3420
3421 __ Move(double_scratch0(), 0.5);
3422 __ add_d(double_scratch0(), input, double_scratch0());
3423
3424 // Check sign of the result: if the sign changed, the input
3425 // value was in ]0.5, 0[ and the result should be -0.
3426 __ Mfhc1(result, double_scratch0());
3427 __ Xor(result, result, Operand(scratch));
3428 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3429 // ARM uses 'mi' here, which is 'lt'
3430 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
3431 } else {
3432 Label skip2;
3433 // ARM uses 'mi' here, which is 'lt'
3434 // Negating it results in 'ge'
3435 __ Branch(&skip2, ge, result, Operand(zero_reg));
3436 __ mov(result, zero_reg);
3437 __ Branch(&done);
3438 __ bind(&skip2);
3439 }
3440
3441 Register except_flag = scratch;
3442 __ EmitFPUTruncate(kRoundToMinusInf,
3443 result,
3444 double_scratch0(),
3445 at,
3446 double_scratch1,
3447 except_flag);
3448
3449 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3450 Operand(zero_reg));
3451
3452 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3453 // Test for -0.
3454 __ Branch(&done, ne, result, Operand(zero_reg));
3455 __ bind(&check_sign_on_zero);
3456 __ Mfhc1(scratch, input);
3457 __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3458 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
3459 Operand(zero_reg));
3460 }
3461 __ bind(&done);
3462}
3463
3464
3465void LCodeGen::DoMathFround(LMathFround* instr) {
3466 DoubleRegister input = ToDoubleRegister(instr->value());
3467 DoubleRegister result = ToDoubleRegister(instr->result());
3468 __ cvt_s_d(result.low(), input);
3469 __ cvt_d_s(result, result.low());
3470}
3471
3472
3473void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3474 DoubleRegister input = ToDoubleRegister(instr->value());
3475 DoubleRegister result = ToDoubleRegister(instr->result());
3476 __ sqrt_d(result, input);
3477}
3478
3479
3480void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3481 DoubleRegister input = ToDoubleRegister(instr->value());
3482 DoubleRegister result = ToDoubleRegister(instr->result());
3483 DoubleRegister temp = ToDoubleRegister(instr->temp());
3484
3485 DCHECK(!input.is(result));
3486
3487 // Note that according to ECMA-262 15.8.2.13:
3488 // Math.pow(-Infinity, 0.5) == Infinity
3489 // Math.sqrt(-Infinity) == NaN
3490 Label done;
3491 __ Move(temp, static_cast<double>(-V8_INFINITY));
3492 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3493 // Set up Infinity in the delay slot.
3494 // result is overwritten if the branch is not taken.
3495 __ neg_d(result, temp);
3496
3497 // Add +0 to convert -0 to +0.
3498 __ add_d(result, input, kDoubleRegZero);
3499 __ sqrt_d(result, result);
3500 __ bind(&done);
3501}
3502
3503
3504void LCodeGen::DoPower(LPower* instr) {
3505 Representation exponent_type = instr->hydrogen()->right()->representation();
3506 // Having marked this as a call, we can use any registers.
3507 // Just make sure that the input/output registers are the expected ones.
3508 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3509 DCHECK(!instr->right()->IsDoubleRegister() ||
3510 ToDoubleRegister(instr->right()).is(f4));
3511 DCHECK(!instr->right()->IsRegister() ||
3512 ToRegister(instr->right()).is(tagged_exponent));
3513 DCHECK(ToDoubleRegister(instr->left()).is(f2));
3514 DCHECK(ToDoubleRegister(instr->result()).is(f0));
3515
3516 if (exponent_type.IsSmi()) {
3517 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3518 __ CallStub(&stub);
3519 } else if (exponent_type.IsTagged()) {
3520 Label no_deopt;
3521 __ JumpIfSmi(tagged_exponent, &no_deopt);
3522 DCHECK(!t3.is(tagged_exponent));
3523 __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3524 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3525 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, t3, Operand(at));
3526 __ bind(&no_deopt);
3527 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3528 __ CallStub(&stub);
3529 } else if (exponent_type.IsInteger32()) {
3530 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3531 __ CallStub(&stub);
3532 } else {
3533 DCHECK(exponent_type.IsDouble());
3534 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3535 __ CallStub(&stub);
3536 }
3537}
3538
3539
3540void LCodeGen::DoMathExp(LMathExp* instr) {
3541 DoubleRegister input = ToDoubleRegister(instr->value());
3542 DoubleRegister result = ToDoubleRegister(instr->result());
3543 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3544 DoubleRegister double_scratch2 = double_scratch0();
3545 Register temp1 = ToRegister(instr->temp1());
3546 Register temp2 = ToRegister(instr->temp2());
3547
3548 MathExpGenerator::EmitMathExp(
3549 masm(), input, result, double_scratch1, double_scratch2,
3550 temp1, temp2, scratch0());
3551}
3552
3553
3554void LCodeGen::DoMathLog(LMathLog* instr) {
3555 __ PrepareCallCFunction(0, 1, scratch0());
3556 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3557 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3558 0, 1);
3559 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3560}
3561
3562
3563void LCodeGen::DoMathClz32(LMathClz32* instr) {
3564 Register input = ToRegister(instr->value());
3565 Register result = ToRegister(instr->result());
3566 __ Clz(result, input);
3567}
3568
Ben Murdochda12d292016-06-02 14:46:10 +01003569void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
3570 Register scratch1, Register scratch2,
3571 Register scratch3) {
3572#if DEBUG
3573 if (actual.is_reg()) {
3574 DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
3575 } else {
3576 DCHECK(!AreAliased(scratch1, scratch2, scratch3));
3577 }
3578#endif
3579 if (FLAG_code_comments) {
3580 if (actual.is_reg()) {
3581 Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
3582 } else {
3583 Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
3584 }
3585 }
3586
3587 // Check if next frame is an arguments adaptor frame.
3588 Register caller_args_count_reg = scratch1;
3589 Label no_arguments_adaptor, formal_parameter_count_loaded;
3590 __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3591 __ lw(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
3592 __ Branch(&no_arguments_adaptor, ne, scratch3,
3593 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3594
3595 // Drop current frame and load arguments count from arguments adaptor frame.
3596 __ mov(fp, scratch2);
3597 __ lw(caller_args_count_reg,
3598 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
3599 __ SmiUntag(caller_args_count_reg);
3600 __ Branch(&formal_parameter_count_loaded);
3601
3602 __ bind(&no_arguments_adaptor);
3603 // Load caller's formal parameter count
3604 __ lw(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3605 __ lw(scratch1,
3606 FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
3607 __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
3608
3609 __ bind(&formal_parameter_count_loaded);
3610 __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
3611
3612 Comment(";;; }");
3613}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003614
3615void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
Ben Murdochda12d292016-06-02 14:46:10 +01003616 HInvokeFunction* hinstr = instr->hydrogen();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003617 DCHECK(ToRegister(instr->context()).is(cp));
3618 DCHECK(ToRegister(instr->function()).is(a1));
3619 DCHECK(instr->HasPointerMap());
3620
Ben Murdochda12d292016-06-02 14:46:10 +01003621 bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
3622
3623 if (is_tail_call) {
3624 DCHECK(!info()->saves_caller_doubles());
3625 ParameterCount actual(instr->arity());
3626 // It is safe to use t0, t1 and t2 as scratch registers here given that
3627 // we are not going to return to caller function anyway.
3628 PrepareForTailCall(actual, t0, t1, t2);
3629 }
3630
3631 Handle<JSFunction> known_function = hinstr->known_function();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003632 if (known_function.is_null()) {
3633 LPointerMap* pointers = instr->pointer_map();
3634 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdochda12d292016-06-02 14:46:10 +01003635 ParameterCount actual(instr->arity());
3636 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3637 __ InvokeFunction(a1, no_reg, actual, flag, generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003638 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01003639 CallKnownFunction(known_function, hinstr->formal_parameter_count(),
3640 instr->arity(), is_tail_call, instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003641 }
3642}
3643
3644
3645void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3646 DCHECK(ToRegister(instr->result()).is(v0));
3647
3648 if (instr->hydrogen()->IsTailCall()) {
3649 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3650
3651 if (instr->target()->IsConstantOperand()) {
3652 LConstantOperand* target = LConstantOperand::cast(instr->target());
3653 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3654 __ Jump(code, RelocInfo::CODE_TARGET);
3655 } else {
3656 DCHECK(instr->target()->IsRegister());
3657 Register target = ToRegister(instr->target());
3658 __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3659 __ Jump(target);
3660 }
3661 } else {
3662 LPointerMap* pointers = instr->pointer_map();
3663 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3664
3665 if (instr->target()->IsConstantOperand()) {
3666 LConstantOperand* target = LConstantOperand::cast(instr->target());
3667 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3668 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3669 __ Call(code, RelocInfo::CODE_TARGET);
3670 } else {
3671 DCHECK(instr->target()->IsRegister());
3672 Register target = ToRegister(instr->target());
3673 generator.BeforeCall(__ CallSize(target));
3674 __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3675 __ Call(target);
3676 }
3677 generator.AfterCall();
3678 }
3679}
3680
3681
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003682void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3683 DCHECK(ToRegister(instr->context()).is(cp));
3684 DCHECK(ToRegister(instr->constructor()).is(a1));
3685 DCHECK(ToRegister(instr->result()).is(v0));
3686
3687 __ li(a0, Operand(instr->arity()));
3688 if (instr->arity() == 1) {
3689 // We only need the allocation site for the case we have a length argument.
3690 // The case may bail out to the runtime, which will determine the correct
3691 // elements kind with the site.
3692 __ li(a2, instr->hydrogen()->site());
3693 } else {
3694 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3695 }
3696 ElementsKind kind = instr->hydrogen()->elements_kind();
3697 AllocationSiteOverrideMode override_mode =
3698 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3699 ? DISABLE_ALLOCATION_SITES
3700 : DONT_OVERRIDE;
3701
3702 if (instr->arity() == 0) {
3703 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3704 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3705 } else if (instr->arity() == 1) {
3706 Label done;
3707 if (IsFastPackedElementsKind(kind)) {
3708 Label packed_case;
3709 // We might need a change here,
3710 // look at the first argument.
3711 __ lw(t1, MemOperand(sp, 0));
3712 __ Branch(&packed_case, eq, t1, Operand(zero_reg));
3713
3714 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3715 ArraySingleArgumentConstructorStub stub(isolate(),
3716 holey_kind,
3717 override_mode);
3718 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3719 __ jmp(&done);
3720 __ bind(&packed_case);
3721 }
3722
3723 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3724 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3725 __ bind(&done);
3726 } else {
3727 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
3728 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3729 }
3730}
3731
3732
3733void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3734 CallRuntime(instr->function(), instr->arity(), instr);
3735}
3736
3737
3738void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3739 Register function = ToRegister(instr->function());
3740 Register code_object = ToRegister(instr->code_object());
3741 __ Addu(code_object, code_object,
3742 Operand(Code::kHeaderSize - kHeapObjectTag));
3743 __ sw(code_object,
3744 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
3745}
3746
3747
3748void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3749 Register result = ToRegister(instr->result());
3750 Register base = ToRegister(instr->base_object());
3751 if (instr->offset()->IsConstantOperand()) {
3752 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3753 __ Addu(result, base, Operand(ToInteger32(offset)));
3754 } else {
3755 Register offset = ToRegister(instr->offset());
3756 __ Addu(result, base, offset);
3757 }
3758}
3759
3760
3761void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3762 Representation representation = instr->representation();
3763
3764 Register object = ToRegister(instr->object());
3765 Register scratch = scratch0();
3766 HObjectAccess access = instr->hydrogen()->access();
3767 int offset = access.offset();
3768
3769 if (access.IsExternalMemory()) {
3770 Register value = ToRegister(instr->value());
3771 MemOperand operand = MemOperand(object, offset);
3772 __ Store(value, operand, representation);
3773 return;
3774 }
3775
3776 __ AssertNotSmi(object);
3777
3778 DCHECK(!representation.IsSmi() ||
3779 !instr->value()->IsConstantOperand() ||
3780 IsSmi(LConstantOperand::cast(instr->value())));
3781 if (representation.IsDouble()) {
3782 DCHECK(access.IsInobject());
3783 DCHECK(!instr->hydrogen()->has_transition());
3784 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3785 DoubleRegister value = ToDoubleRegister(instr->value());
3786 __ sdc1(value, FieldMemOperand(object, offset));
3787 return;
3788 }
3789
3790 if (instr->hydrogen()->has_transition()) {
3791 Handle<Map> transition = instr->hydrogen()->transition_map();
3792 AddDeprecationDependency(transition);
3793 __ li(scratch, Operand(transition));
3794 __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3795 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3796 Register temp = ToRegister(instr->temp());
3797 // Update the write barrier for the map field.
3798 __ RecordWriteForMap(object,
3799 scratch,
3800 temp,
3801 GetRAState(),
3802 kSaveFPRegs);
3803 }
3804 }
3805
3806 // Do the store.
3807 Register value = ToRegister(instr->value());
3808 if (access.IsInobject()) {
3809 MemOperand operand = FieldMemOperand(object, offset);
3810 __ Store(value, operand, representation);
3811 if (instr->hydrogen()->NeedsWriteBarrier()) {
3812 // Update the write barrier for the object for in-object properties.
3813 __ RecordWriteField(object,
3814 offset,
3815 value,
3816 scratch,
3817 GetRAState(),
3818 kSaveFPRegs,
3819 EMIT_REMEMBERED_SET,
3820 instr->hydrogen()->SmiCheckForWriteBarrier(),
3821 instr->hydrogen()->PointersToHereCheckForValue());
3822 }
3823 } else {
3824 __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3825 MemOperand operand = FieldMemOperand(scratch, offset);
3826 __ Store(value, operand, representation);
3827 if (instr->hydrogen()->NeedsWriteBarrier()) {
3828 // Update the write barrier for the properties array.
3829 // object is used as a scratch register.
3830 __ RecordWriteField(scratch,
3831 offset,
3832 value,
3833 object,
3834 GetRAState(),
3835 kSaveFPRegs,
3836 EMIT_REMEMBERED_SET,
3837 instr->hydrogen()->SmiCheckForWriteBarrier(),
3838 instr->hydrogen()->PointersToHereCheckForValue());
3839 }
3840 }
3841}
3842
3843
3844void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3845 DCHECK(ToRegister(instr->context()).is(cp));
3846 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
3847 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
3848
3849 if (instr->hydrogen()->HasVectorAndSlot()) {
3850 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
3851 }
3852
3853 __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
3854 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
3855 isolate(), instr->language_mode(),
3856 instr->hydrogen()->initialization_state()).code();
3857 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3858}
3859
3860
3861void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3862 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
3863 Operand operand(0);
3864 Register reg;
3865 if (instr->index()->IsConstantOperand()) {
3866 operand = ToOperand(instr->index());
3867 reg = ToRegister(instr->length());
3868 cc = CommuteCondition(cc);
3869 } else {
3870 reg = ToRegister(instr->index());
3871 operand = ToOperand(instr->length());
3872 }
3873 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
3874 Label done;
3875 __ Branch(&done, NegateCondition(cc), reg, operand);
3876 __ stop("eliminated bounds check failed");
3877 __ bind(&done);
3878 } else {
3879 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
3880 }
3881}
3882
3883
3884void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
3885 Register external_pointer = ToRegister(instr->elements());
3886 Register key = no_reg;
3887 ElementsKind elements_kind = instr->elements_kind();
3888 bool key_is_constant = instr->key()->IsConstantOperand();
3889 int constant_key = 0;
3890 if (key_is_constant) {
3891 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3892 if (constant_key & 0xF0000000) {
3893 Abort(kArrayIndexConstantValueTooBig);
3894 }
3895 } else {
3896 key = ToRegister(instr->key());
3897 }
3898 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3899 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3900 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3901 int base_offset = instr->base_offset();
3902
3903 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
3904 Register address = scratch0();
3905 FPURegister value(ToDoubleRegister(instr->value()));
3906 if (key_is_constant) {
3907 if (constant_key != 0) {
3908 __ Addu(address, external_pointer,
3909 Operand(constant_key << element_size_shift));
3910 } else {
3911 address = external_pointer;
3912 }
3913 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003914 __ Lsa(address, external_pointer, key, shift_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003915 }
3916
3917 if (elements_kind == FLOAT32_ELEMENTS) {
3918 __ cvt_s_d(double_scratch0(), value);
3919 __ swc1(double_scratch0(), MemOperand(address, base_offset));
3920 } else { // Storing doubles, not floats.
3921 __ sdc1(value, MemOperand(address, base_offset));
3922 }
3923 } else {
3924 Register value(ToRegister(instr->value()));
3925 MemOperand mem_operand = PrepareKeyedOperand(
3926 key, external_pointer, key_is_constant, constant_key,
3927 element_size_shift, shift_size,
3928 base_offset);
3929 switch (elements_kind) {
3930 case UINT8_ELEMENTS:
3931 case UINT8_CLAMPED_ELEMENTS:
3932 case INT8_ELEMENTS:
3933 __ sb(value, mem_operand);
3934 break;
3935 case INT16_ELEMENTS:
3936 case UINT16_ELEMENTS:
3937 __ sh(value, mem_operand);
3938 break;
3939 case INT32_ELEMENTS:
3940 case UINT32_ELEMENTS:
3941 __ sw(value, mem_operand);
3942 break;
3943 case FLOAT32_ELEMENTS:
3944 case FLOAT64_ELEMENTS:
3945 case FAST_DOUBLE_ELEMENTS:
3946 case FAST_ELEMENTS:
3947 case FAST_SMI_ELEMENTS:
3948 case FAST_HOLEY_DOUBLE_ELEMENTS:
3949 case FAST_HOLEY_ELEMENTS:
3950 case FAST_HOLEY_SMI_ELEMENTS:
3951 case DICTIONARY_ELEMENTS:
3952 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3953 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01003954 case FAST_STRING_WRAPPER_ELEMENTS:
3955 case SLOW_STRING_WRAPPER_ELEMENTS:
3956 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003957 UNREACHABLE();
3958 break;
3959 }
3960 }
3961}
3962
3963
3964void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
3965 DoubleRegister value = ToDoubleRegister(instr->value());
3966 Register elements = ToRegister(instr->elements());
3967 Register scratch = scratch0();
3968 Register scratch_1 = scratch1();
3969 DoubleRegister double_scratch = double_scratch0();
3970 bool key_is_constant = instr->key()->IsConstantOperand();
3971 int base_offset = instr->base_offset();
3972 Label not_nan, done;
3973
3974 // Calculate the effective address of the slot in the array to store the
3975 // double value.
3976 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3977 if (key_is_constant) {
3978 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3979 if (constant_key & 0xF0000000) {
3980 Abort(kArrayIndexConstantValueTooBig);
3981 }
3982 __ Addu(scratch, elements,
3983 Operand((constant_key << element_size_shift) + base_offset));
3984 } else {
3985 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3986 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3987 __ Addu(scratch, elements, Operand(base_offset));
3988 __ sll(at, ToRegister(instr->key()), shift_size);
3989 __ Addu(scratch, scratch, at);
3990 }
3991
3992 if (instr->NeedsCanonicalization()) {
3993 Label is_nan;
3994 // Check for NaN. All NaNs must be canonicalized.
3995 __ BranchF(NULL, &is_nan, eq, value, value);
3996 __ Branch(&not_nan);
3997
3998 // Only load canonical NaN if the comparison above set the overflow.
3999 __ bind(&is_nan);
4000 __ LoadRoot(scratch_1, Heap::kNanValueRootIndex);
4001 __ ldc1(double_scratch,
4002 FieldMemOperand(scratch_1, HeapNumber::kValueOffset));
4003 __ sdc1(double_scratch, MemOperand(scratch, 0));
4004 __ Branch(&done);
4005 }
4006
4007 __ bind(&not_nan);
4008 __ sdc1(value, MemOperand(scratch, 0));
4009 __ bind(&done);
4010}
4011
4012
4013void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4014 Register value = ToRegister(instr->value());
4015 Register elements = ToRegister(instr->elements());
4016 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4017 : no_reg;
4018 Register scratch = scratch0();
4019 Register store_base = scratch;
4020 int offset = instr->base_offset();
4021
4022 // Do the store.
4023 if (instr->key()->IsConstantOperand()) {
4024 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4025 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4026 offset += ToInteger32(const_operand) * kPointerSize;
4027 store_base = elements;
4028 } else {
4029 // Even though the HLoadKeyed instruction forces the input
4030 // representation for the key to be an integer, the input gets replaced
4031 // during bound check elimination with the index argument to the bounds
4032 // check, which can be tagged, so that case must be handled here, too.
4033 if (instr->hydrogen()->key()->representation().IsSmi()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004034 __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004035 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004036 __ Lsa(scratch, elements, key, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004037 }
4038 }
4039 __ sw(value, MemOperand(store_base, offset));
4040
4041 if (instr->hydrogen()->NeedsWriteBarrier()) {
4042 SmiCheck check_needed =
4043 instr->hydrogen()->value()->type().IsHeapObject()
4044 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4045 // Compute address of modified element and store it into key register.
4046 __ Addu(key, store_base, Operand(offset));
4047 __ RecordWrite(elements,
4048 key,
4049 value,
4050 GetRAState(),
4051 kSaveFPRegs,
4052 EMIT_REMEMBERED_SET,
4053 check_needed,
4054 instr->hydrogen()->PointersToHereCheckForValue());
4055 }
4056}
4057
4058
4059void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4060 // By cases: external, fast double
4061 if (instr->is_fixed_typed_array()) {
4062 DoStoreKeyedExternalArray(instr);
4063 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4064 DoStoreKeyedFixedDoubleArray(instr);
4065 } else {
4066 DoStoreKeyedFixedArray(instr);
4067 }
4068}
4069
4070
4071void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4072 DCHECK(ToRegister(instr->context()).is(cp));
4073 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4074 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4075 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4076
4077 if (instr->hydrogen()->HasVectorAndSlot()) {
4078 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4079 }
4080
4081 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4082 isolate(), instr->language_mode(),
4083 instr->hydrogen()->initialization_state()).code();
4084 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4085}
4086
4087
4088void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4089 class DeferredMaybeGrowElements final : public LDeferredCode {
4090 public:
4091 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4092 : LDeferredCode(codegen), instr_(instr) {}
4093 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4094 LInstruction* instr() override { return instr_; }
4095
4096 private:
4097 LMaybeGrowElements* instr_;
4098 };
4099
4100 Register result = v0;
4101 DeferredMaybeGrowElements* deferred =
4102 new (zone()) DeferredMaybeGrowElements(this, instr);
4103 LOperand* key = instr->key();
4104 LOperand* current_capacity = instr->current_capacity();
4105
4106 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4107 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4108 DCHECK(key->IsConstantOperand() || key->IsRegister());
4109 DCHECK(current_capacity->IsConstantOperand() ||
4110 current_capacity->IsRegister());
4111
4112 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4113 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4114 int32_t constant_capacity =
4115 ToInteger32(LConstantOperand::cast(current_capacity));
4116 if (constant_key >= constant_capacity) {
4117 // Deferred case.
4118 __ jmp(deferred->entry());
4119 }
4120 } else if (key->IsConstantOperand()) {
4121 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4122 __ Branch(deferred->entry(), le, ToRegister(current_capacity),
4123 Operand(constant_key));
4124 } else if (current_capacity->IsConstantOperand()) {
4125 int32_t constant_capacity =
4126 ToInteger32(LConstantOperand::cast(current_capacity));
4127 __ Branch(deferred->entry(), ge, ToRegister(key),
4128 Operand(constant_capacity));
4129 } else {
4130 __ Branch(deferred->entry(), ge, ToRegister(key),
4131 Operand(ToRegister(current_capacity)));
4132 }
4133
4134 if (instr->elements()->IsRegister()) {
4135 __ mov(result, ToRegister(instr->elements()));
4136 } else {
4137 __ lw(result, ToMemOperand(instr->elements()));
4138 }
4139
4140 __ bind(deferred->exit());
4141}
4142
4143
4144void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4145 // TODO(3095996): Get rid of this. For now, we need to make the
4146 // result register contain a valid pointer because it is already
4147 // contained in the register pointer map.
4148 Register result = v0;
4149 __ mov(result, zero_reg);
4150
4151 // We have to call a stub.
4152 {
4153 PushSafepointRegistersScope scope(this);
4154 if (instr->object()->IsRegister()) {
4155 __ mov(result, ToRegister(instr->object()));
4156 } else {
4157 __ lw(result, ToMemOperand(instr->object()));
4158 }
4159
4160 LOperand* key = instr->key();
4161 if (key->IsConstantOperand()) {
4162 __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
4163 } else {
4164 __ mov(a3, ToRegister(key));
4165 __ SmiTag(a3);
4166 }
4167
4168 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4169 instr->hydrogen()->kind());
4170 __ mov(a0, result);
4171 __ CallStub(&stub);
4172 RecordSafepointWithLazyDeopt(
4173 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4174 __ StoreToSafepointRegisterSlot(result, result);
4175 }
4176
4177 // Deopt on smi, which means the elements array changed to dictionary mode.
4178 __ SmiTst(result, at);
4179 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
4180}
4181
4182
4183void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4184 Register object_reg = ToRegister(instr->object());
4185 Register scratch = scratch0();
4186
4187 Handle<Map> from_map = instr->original_map();
4188 Handle<Map> to_map = instr->transitioned_map();
4189 ElementsKind from_kind = instr->from_kind();
4190 ElementsKind to_kind = instr->to_kind();
4191
4192 Label not_applicable;
4193 __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4194 __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4195
4196 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4197 Register new_map_reg = ToRegister(instr->new_map_temp());
4198 __ li(new_map_reg, Operand(to_map));
4199 __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4200 // Write barrier.
4201 __ RecordWriteForMap(object_reg,
4202 new_map_reg,
4203 scratch,
4204 GetRAState(),
4205 kDontSaveFPRegs);
4206 } else {
4207 DCHECK(object_reg.is(a0));
4208 DCHECK(ToRegister(instr->context()).is(cp));
4209 PushSafepointRegistersScope scope(this);
4210 __ li(a1, Operand(to_map));
4211 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4212 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4213 __ CallStub(&stub);
4214 RecordSafepointWithRegisters(
4215 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4216 }
4217 __ bind(&not_applicable);
4218}
4219
4220
4221void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4222 Register object = ToRegister(instr->object());
4223 Register temp = ToRegister(instr->temp());
4224 Label no_memento_found;
Ben Murdochda12d292016-06-02 14:46:10 +01004225 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004226 DeoptimizeIf(al, instr);
4227 __ bind(&no_memento_found);
4228}
4229
4230
4231void LCodeGen::DoStringAdd(LStringAdd* instr) {
4232 DCHECK(ToRegister(instr->context()).is(cp));
4233 DCHECK(ToRegister(instr->left()).is(a1));
4234 DCHECK(ToRegister(instr->right()).is(a0));
4235 StringAddStub stub(isolate(),
4236 instr->hydrogen()->flags(),
4237 instr->hydrogen()->pretenure_flag());
4238 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4239}
4240
4241
4242void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4243 class DeferredStringCharCodeAt final : public LDeferredCode {
4244 public:
4245 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4246 : LDeferredCode(codegen), instr_(instr) { }
4247 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4248 LInstruction* instr() override { return instr_; }
4249
4250 private:
4251 LStringCharCodeAt* instr_;
4252 };
4253
4254 DeferredStringCharCodeAt* deferred =
4255 new(zone()) DeferredStringCharCodeAt(this, instr);
4256 StringCharLoadGenerator::Generate(masm(),
4257 ToRegister(instr->string()),
4258 ToRegister(instr->index()),
4259 ToRegister(instr->result()),
4260 deferred->entry());
4261 __ bind(deferred->exit());
4262}
4263
4264
4265void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4266 Register string = ToRegister(instr->string());
4267 Register result = ToRegister(instr->result());
4268 Register scratch = scratch0();
4269
4270 // TODO(3095996): Get rid of this. For now, we need to make the
4271 // result register contain a valid pointer because it is already
4272 // contained in the register pointer map.
4273 __ mov(result, zero_reg);
4274
4275 PushSafepointRegistersScope scope(this);
4276 __ push(string);
4277 // Push the index as a smi. This is safe because of the checks in
4278 // DoStringCharCodeAt above.
4279 if (instr->index()->IsConstantOperand()) {
4280 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4281 __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4282 __ push(scratch);
4283 } else {
4284 Register index = ToRegister(instr->index());
4285 __ SmiTag(index);
4286 __ push(index);
4287 }
4288 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4289 instr->context());
4290 __ AssertSmi(v0);
4291 __ SmiUntag(v0);
4292 __ StoreToSafepointRegisterSlot(v0, result);
4293}
4294
4295
4296void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4297 class DeferredStringCharFromCode final : public LDeferredCode {
4298 public:
4299 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4300 : LDeferredCode(codegen), instr_(instr) { }
4301 void Generate() override {
4302 codegen()->DoDeferredStringCharFromCode(instr_);
4303 }
4304 LInstruction* instr() override { return instr_; }
4305
4306 private:
4307 LStringCharFromCode* instr_;
4308 };
4309
4310 DeferredStringCharFromCode* deferred =
4311 new(zone()) DeferredStringCharFromCode(this, instr);
4312
4313 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4314 Register char_code = ToRegister(instr->char_code());
4315 Register result = ToRegister(instr->result());
4316 Register scratch = scratch0();
4317 DCHECK(!char_code.is(result));
4318
4319 __ Branch(deferred->entry(), hi,
4320 char_code, Operand(String::kMaxOneByteCharCode));
4321 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004322 __ Lsa(result, result, char_code, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004323 __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4324 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4325 __ Branch(deferred->entry(), eq, result, Operand(scratch));
4326 __ bind(deferred->exit());
4327}
4328
4329
4330void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4331 Register char_code = ToRegister(instr->char_code());
4332 Register result = ToRegister(instr->result());
4333
4334 // TODO(3095996): Get rid of this. For now, we need to make the
4335 // result register contain a valid pointer because it is already
4336 // contained in the register pointer map.
4337 __ mov(result, zero_reg);
4338
4339 PushSafepointRegistersScope scope(this);
4340 __ SmiTag(char_code);
4341 __ push(char_code);
4342 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4343 instr->context());
4344 __ StoreToSafepointRegisterSlot(v0, result);
4345}
4346
4347
4348void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4349 LOperand* input = instr->value();
4350 DCHECK(input->IsRegister() || input->IsStackSlot());
4351 LOperand* output = instr->result();
4352 DCHECK(output->IsDoubleRegister());
4353 FPURegister single_scratch = double_scratch0().low();
4354 if (input->IsStackSlot()) {
4355 Register scratch = scratch0();
4356 __ lw(scratch, ToMemOperand(input));
4357 __ mtc1(scratch, single_scratch);
4358 } else {
4359 __ mtc1(ToRegister(input), single_scratch);
4360 }
4361 __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4362}
4363
4364
4365void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4366 LOperand* input = instr->value();
4367 LOperand* output = instr->result();
4368
4369 __ Cvt_d_uw(ToDoubleRegister(output), ToRegister(input), f22);
4370}
4371
4372
4373void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4374 class DeferredNumberTagI final : public LDeferredCode {
4375 public:
4376 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4377 : LDeferredCode(codegen), instr_(instr) { }
4378 void Generate() override {
4379 codegen()->DoDeferredNumberTagIU(instr_,
4380 instr_->value(),
4381 instr_->temp1(),
4382 instr_->temp2(),
4383 SIGNED_INT32);
4384 }
4385 LInstruction* instr() override { return instr_; }
4386
4387 private:
4388 LNumberTagI* instr_;
4389 };
4390
4391 Register src = ToRegister(instr->value());
4392 Register dst = ToRegister(instr->result());
4393 Register overflow = scratch0();
4394
4395 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4396 __ SmiTagCheckOverflow(dst, src, overflow);
4397 __ BranchOnOverflow(deferred->entry(), overflow);
4398 __ bind(deferred->exit());
4399}
4400
4401
4402void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4403 class DeferredNumberTagU final : public LDeferredCode {
4404 public:
4405 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4406 : LDeferredCode(codegen), instr_(instr) { }
4407 void Generate() override {
4408 codegen()->DoDeferredNumberTagIU(instr_,
4409 instr_->value(),
4410 instr_->temp1(),
4411 instr_->temp2(),
4412 UNSIGNED_INT32);
4413 }
4414 LInstruction* instr() override { return instr_; }
4415
4416 private:
4417 LNumberTagU* instr_;
4418 };
4419
4420 Register input = ToRegister(instr->value());
4421 Register result = ToRegister(instr->result());
4422
4423 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4424 __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4425 __ SmiTag(result, input);
4426 __ bind(deferred->exit());
4427}
4428
4429
4430void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4431 LOperand* value,
4432 LOperand* temp1,
4433 LOperand* temp2,
4434 IntegerSignedness signedness) {
4435 Label done, slow;
4436 Register src = ToRegister(value);
4437 Register dst = ToRegister(instr->result());
4438 Register tmp1 = scratch0();
4439 Register tmp2 = ToRegister(temp1);
4440 Register tmp3 = ToRegister(temp2);
4441 DoubleRegister dbl_scratch = double_scratch0();
4442
4443 if (signedness == SIGNED_INT32) {
4444 // There was overflow, so bits 30 and 31 of the original integer
4445 // disagree. Try to allocate a heap number in new space and store
4446 // the value in there. If that fails, call the runtime system.
4447 if (dst.is(src)) {
4448 __ SmiUntag(src, dst);
4449 __ Xor(src, src, Operand(0x80000000));
4450 }
4451 __ mtc1(src, dbl_scratch);
4452 __ cvt_d_w(dbl_scratch, dbl_scratch);
4453 } else {
4454 __ Cvt_d_uw(dbl_scratch, src, f22);
4455 }
4456
4457 if (FLAG_inline_new) {
4458 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4459 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4460 __ Branch(&done);
4461 }
4462
4463 // Slow case: Call the runtime system to do the number allocation.
4464 __ bind(&slow);
4465 {
4466 // TODO(3095996): Put a valid pointer value in the stack slot where the
4467 // result register is stored, as this register is in the pointer map, but
4468 // contains an integer value.
4469 __ mov(dst, zero_reg);
4470
4471 // Preserve the value of all registers.
4472 PushSafepointRegistersScope scope(this);
4473
4474 // NumberTagI and NumberTagD use the context from the frame, rather than
4475 // the environment's HContext or HInlinedContext value.
4476 // They only call Runtime::kAllocateHeapNumber.
4477 // The corresponding HChange instructions are added in a phase that does
4478 // not have easy access to the local context.
4479 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4480 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4481 RecordSafepointWithRegisters(
4482 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4483 __ Subu(v0, v0, kHeapObjectTag);
4484 __ StoreToSafepointRegisterSlot(v0, dst);
4485 }
4486
4487
4488 // Done. Put the value in dbl_scratch into the value of the allocated heap
4489 // number.
4490 __ bind(&done);
4491 __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
4492 __ Addu(dst, dst, kHeapObjectTag);
4493}
4494
4495
4496void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4497 class DeferredNumberTagD final : public LDeferredCode {
4498 public:
4499 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4500 : LDeferredCode(codegen), instr_(instr) { }
4501 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4502 LInstruction* instr() override { return instr_; }
4503
4504 private:
4505 LNumberTagD* instr_;
4506 };
4507
4508 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4509 Register scratch = scratch0();
4510 Register reg = ToRegister(instr->result());
4511 Register temp1 = ToRegister(instr->temp());
4512 Register temp2 = ToRegister(instr->temp2());
4513
4514 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4515 if (FLAG_inline_new) {
4516 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4517 // We want the untagged address first for performance
4518 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4519 DONT_TAG_RESULT);
4520 } else {
4521 __ Branch(deferred->entry());
4522 }
4523 __ bind(deferred->exit());
4524 __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4525 // Now that we have finished with the object's real address tag it
4526 __ Addu(reg, reg, kHeapObjectTag);
4527}
4528
4529
4530void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4531 // TODO(3095996): Get rid of this. For now, we need to make the
4532 // result register contain a valid pointer because it is already
4533 // contained in the register pointer map.
4534 Register reg = ToRegister(instr->result());
4535 __ mov(reg, zero_reg);
4536
4537 PushSafepointRegistersScope scope(this);
4538 // NumberTagI and NumberTagD use the context from the frame, rather than
4539 // the environment's HContext or HInlinedContext value.
4540 // They only call Runtime::kAllocateHeapNumber.
4541 // The corresponding HChange instructions are added in a phase that does
4542 // not have easy access to the local context.
4543 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4544 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4545 RecordSafepointWithRegisters(
4546 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4547 __ Subu(v0, v0, kHeapObjectTag);
4548 __ StoreToSafepointRegisterSlot(v0, reg);
4549}
4550
4551
4552void LCodeGen::DoSmiTag(LSmiTag* instr) {
4553 HChange* hchange = instr->hydrogen();
4554 Register input = ToRegister(instr->value());
4555 Register output = ToRegister(instr->result());
4556 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4557 hchange->value()->CheckFlag(HValue::kUint32)) {
4558 __ And(at, input, Operand(0xc0000000));
4559 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4560 }
4561 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4562 !hchange->value()->CheckFlag(HValue::kUint32)) {
4563 __ SmiTagCheckOverflow(output, input, at);
4564 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4565 } else {
4566 __ SmiTag(output, input);
4567 }
4568}
4569
4570
4571void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4572 Register scratch = scratch0();
4573 Register input = ToRegister(instr->value());
4574 Register result = ToRegister(instr->result());
4575 if (instr->needs_check()) {
4576 STATIC_ASSERT(kHeapObjectTag == 1);
4577 // If the input is a HeapObject, value of scratch won't be zero.
4578 __ And(scratch, input, Operand(kHeapObjectTag));
4579 __ SmiUntag(result, input);
4580 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
4581 } else {
4582 __ SmiUntag(result, input);
4583 }
4584}
4585
4586
4587void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4588 DoubleRegister result_reg,
4589 NumberUntagDMode mode) {
4590 bool can_convert_undefined_to_nan =
4591 instr->hydrogen()->can_convert_undefined_to_nan();
4592 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4593
4594 Register scratch = scratch0();
4595 Label convert, load_smi, done;
4596 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4597 // Smi check.
4598 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4599 // Heap number map check.
4600 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4601 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4602 if (can_convert_undefined_to_nan) {
4603 __ Branch(&convert, ne, scratch, Operand(at));
4604 } else {
4605 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
4606 Operand(at));
4607 }
4608 // Load heap number.
4609 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4610 if (deoptimize_on_minus_zero) {
4611 __ mfc1(at, result_reg.low());
4612 __ Branch(&done, ne, at, Operand(zero_reg));
4613 __ Mfhc1(scratch, result_reg);
4614 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
4615 Operand(HeapNumber::kSignMask));
4616 }
4617 __ Branch(&done);
4618 if (can_convert_undefined_to_nan) {
4619 __ bind(&convert);
4620 // Convert undefined (and hole) to NaN.
4621 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4622 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
4623 Operand(at));
4624 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4625 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4626 __ Branch(&done);
4627 }
4628 } else {
4629 __ SmiUntag(scratch, input_reg);
4630 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4631 }
4632 // Smi to double register conversion
4633 __ bind(&load_smi);
4634 // scratch: untagged value of input_reg
4635 __ mtc1(scratch, result_reg);
4636 __ cvt_d_w(result_reg, result_reg);
4637 __ bind(&done);
4638}
4639
4640
4641void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4642 Register input_reg = ToRegister(instr->value());
4643 Register scratch1 = scratch0();
4644 Register scratch2 = ToRegister(instr->temp());
4645 DoubleRegister double_scratch = double_scratch0();
4646 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4647
4648 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4649 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4650
4651 Label done;
4652
4653 // The input is a tagged HeapObject.
4654 // Heap number map check.
4655 __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4656 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4657 // This 'at' value and scratch1 map value are used for tests in both clauses
4658 // of the if.
4659
4660 if (instr->truncating()) {
4661 // Performs a truncating conversion of a floating point number as used by
4662 // the JS bitwise operations.
4663 Label no_heap_number, check_bools, check_false;
4664 // Check HeapNumber map.
4665 __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
4666 __ mov(scratch2, input_reg); // In delay slot.
4667 __ TruncateHeapNumberToI(input_reg, scratch2);
4668 __ Branch(&done);
4669
4670 // Check for Oddballs. Undefined/False is converted to zero and True to one
4671 // for truncating conversions.
4672 __ bind(&no_heap_number);
4673 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4674 __ Branch(&check_bools, ne, input_reg, Operand(at));
4675 DCHECK(ToRegister(instr->result()).is(input_reg));
4676 __ Branch(USE_DELAY_SLOT, &done);
4677 __ mov(input_reg, zero_reg); // In delay slot.
4678
4679 __ bind(&check_bools);
4680 __ LoadRoot(at, Heap::kTrueValueRootIndex);
4681 __ Branch(&check_false, ne, scratch2, Operand(at));
4682 __ Branch(USE_DELAY_SLOT, &done);
4683 __ li(input_reg, Operand(1)); // In delay slot.
4684
4685 __ bind(&check_false);
4686 __ LoadRoot(at, Heap::kFalseValueRootIndex);
4687 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
4688 scratch2, Operand(at));
4689 __ Branch(USE_DELAY_SLOT, &done);
4690 __ mov(input_reg, zero_reg); // In delay slot.
4691 } else {
4692 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
4693 Operand(at));
4694
4695 // Load the double value.
4696 __ ldc1(double_scratch,
4697 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4698
4699 Register except_flag = scratch2;
4700 __ EmitFPUTruncate(kRoundToZero,
4701 input_reg,
4702 double_scratch,
4703 scratch1,
4704 double_scratch2,
4705 except_flag,
4706 kCheckForInexactConversion);
4707
4708 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4709 Operand(zero_reg));
4710
4711 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4712 __ Branch(&done, ne, input_reg, Operand(zero_reg));
4713
4714 __ Mfhc1(scratch1, double_scratch);
4715 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4716 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
4717 Operand(zero_reg));
4718 }
4719 }
4720 __ bind(&done);
4721}
4722
4723
4724void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4725 class DeferredTaggedToI final : public LDeferredCode {
4726 public:
4727 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4728 : LDeferredCode(codegen), instr_(instr) { }
4729 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4730 LInstruction* instr() override { return instr_; }
4731
4732 private:
4733 LTaggedToI* instr_;
4734 };
4735
4736 LOperand* input = instr->value();
4737 DCHECK(input->IsRegister());
4738 DCHECK(input->Equals(instr->result()));
4739
4740 Register input_reg = ToRegister(input);
4741
4742 if (instr->hydrogen()->value()->representation().IsSmi()) {
4743 __ SmiUntag(input_reg);
4744 } else {
4745 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4746
4747 // Let the deferred code handle the HeapObject case.
4748 __ JumpIfNotSmi(input_reg, deferred->entry());
4749
4750 // Smi to int32 conversion.
4751 __ SmiUntag(input_reg);
4752 __ bind(deferred->exit());
4753 }
4754}
4755
4756
4757void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4758 LOperand* input = instr->value();
4759 DCHECK(input->IsRegister());
4760 LOperand* result = instr->result();
4761 DCHECK(result->IsDoubleRegister());
4762
4763 Register input_reg = ToRegister(input);
4764 DoubleRegister result_reg = ToDoubleRegister(result);
4765
4766 HValue* value = instr->hydrogen()->value();
4767 NumberUntagDMode mode = value->representation().IsSmi()
4768 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4769
4770 EmitNumberUntagD(instr, input_reg, result_reg, mode);
4771}
4772
4773
4774void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4775 Register result_reg = ToRegister(instr->result());
4776 Register scratch1 = scratch0();
4777 DoubleRegister double_input = ToDoubleRegister(instr->value());
4778
4779 if (instr->truncating()) {
4780 __ TruncateDoubleToI(result_reg, double_input);
4781 } else {
4782 Register except_flag = LCodeGen::scratch1();
4783
4784 __ EmitFPUTruncate(kRoundToMinusInf,
4785 result_reg,
4786 double_input,
4787 scratch1,
4788 double_scratch0(),
4789 except_flag,
4790 kCheckForInexactConversion);
4791
4792 // Deopt if the operation did not succeed (except_flag != 0).
4793 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4794 Operand(zero_reg));
4795
4796 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4797 Label done;
4798 __ Branch(&done, ne, result_reg, Operand(zero_reg));
4799 __ Mfhc1(scratch1, double_input);
4800 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4801 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
4802 Operand(zero_reg));
4803 __ bind(&done);
4804 }
4805 }
4806}
4807
4808
4809void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4810 Register result_reg = ToRegister(instr->result());
4811 Register scratch1 = LCodeGen::scratch0();
4812 DoubleRegister double_input = ToDoubleRegister(instr->value());
4813
4814 if (instr->truncating()) {
4815 __ TruncateDoubleToI(result_reg, double_input);
4816 } else {
4817 Register except_flag = LCodeGen::scratch1();
4818
4819 __ EmitFPUTruncate(kRoundToMinusInf,
4820 result_reg,
4821 double_input,
4822 scratch1,
4823 double_scratch0(),
4824 except_flag,
4825 kCheckForInexactConversion);
4826
4827 // Deopt if the operation did not succeed (except_flag != 0).
4828 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4829 Operand(zero_reg));
4830
4831 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4832 Label done;
4833 __ Branch(&done, ne, result_reg, Operand(zero_reg));
4834 __ Mfhc1(scratch1, double_input);
4835 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4836 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
4837 Operand(zero_reg));
4838 __ bind(&done);
4839 }
4840 }
4841 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
4842 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch1, Operand(zero_reg));
4843}
4844
4845
4846void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4847 LOperand* input = instr->value();
4848 __ SmiTst(ToRegister(input), at);
4849 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
4850}
4851
4852
4853void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4854 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4855 LOperand* input = instr->value();
4856 __ SmiTst(ToRegister(input), at);
4857 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
4858 }
4859}
4860
4861
4862void LCodeGen::DoCheckArrayBufferNotNeutered(
4863 LCheckArrayBufferNotNeutered* instr) {
4864 Register view = ToRegister(instr->view());
4865 Register scratch = scratch0();
4866
4867 __ lw(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
4868 __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
4869 __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
4870 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg));
4871}
4872
4873
4874void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4875 Register input = ToRegister(instr->value());
4876 Register scratch = scratch0();
4877
4878 __ GetObjectType(input, scratch, scratch);
4879
4880 if (instr->hydrogen()->is_interval_check()) {
4881 InstanceType first;
4882 InstanceType last;
4883 instr->hydrogen()->GetCheckInterval(&first, &last);
4884
4885 // If there is only one type in the interval check for equality.
4886 if (first == last) {
4887 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
4888 Operand(first));
4889 } else {
4890 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
4891 Operand(first));
4892 // Omit check for the last type.
4893 if (last != LAST_TYPE) {
4894 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
4895 Operand(last));
4896 }
4897 }
4898 } else {
4899 uint8_t mask;
4900 uint8_t tag;
4901 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4902
4903 if (base::bits::IsPowerOfTwo32(mask)) {
4904 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
4905 __ And(at, scratch, mask);
4906 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
4907 at, Operand(zero_reg));
4908 } else {
4909 __ And(scratch, scratch, Operand(mask));
4910 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
4911 Operand(tag));
4912 }
4913 }
4914}
4915
4916
4917void LCodeGen::DoCheckValue(LCheckValue* instr) {
4918 Register reg = ToRegister(instr->value());
4919 Handle<HeapObject> object = instr->hydrogen()->object().handle();
4920 AllowDeferredHandleDereference smi_check;
4921 if (isolate()->heap()->InNewSpace(*object)) {
4922 Register reg = ToRegister(instr->value());
4923 Handle<Cell> cell = isolate()->factory()->NewCell(object);
4924 __ li(at, Operand(cell));
4925 __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
4926 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
4927 } else {
4928 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
4929 }
4930}
4931
4932
4933void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
4934 {
4935 PushSafepointRegistersScope scope(this);
4936 __ push(object);
4937 __ mov(cp, zero_reg);
4938 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
4939 RecordSafepointWithRegisters(
4940 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
4941 __ StoreToSafepointRegisterSlot(v0, scratch0());
4942 }
4943 __ SmiTst(scratch0(), at);
4944 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
4945 Operand(zero_reg));
4946}
4947
4948
4949void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4950 class DeferredCheckMaps final : public LDeferredCode {
4951 public:
4952 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
4953 : LDeferredCode(codegen), instr_(instr), object_(object) {
4954 SetExit(check_maps());
4955 }
4956 void Generate() override {
4957 codegen()->DoDeferredInstanceMigration(instr_, object_);
4958 }
4959 Label* check_maps() { return &check_maps_; }
4960 LInstruction* instr() override { return instr_; }
4961
4962 private:
4963 LCheckMaps* instr_;
4964 Label check_maps_;
4965 Register object_;
4966 };
4967
4968 if (instr->hydrogen()->IsStabilityCheck()) {
4969 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4970 for (int i = 0; i < maps->size(); ++i) {
4971 AddStabilityDependency(maps->at(i).handle());
4972 }
4973 return;
4974 }
4975
4976 Register map_reg = scratch0();
4977 LOperand* input = instr->value();
4978 DCHECK(input->IsRegister());
4979 Register reg = ToRegister(input);
4980 __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
4981
4982 DeferredCheckMaps* deferred = NULL;
4983 if (instr->hydrogen()->HasMigrationTarget()) {
4984 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
4985 __ bind(deferred->check_maps());
4986 }
4987
4988 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4989 Label success;
4990 for (int i = 0; i < maps->size() - 1; i++) {
4991 Handle<Map> map = maps->at(i).handle();
4992 __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
4993 }
4994 Handle<Map> map = maps->at(maps->size() - 1).handle();
4995 // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
4996 if (instr->hydrogen()->HasMigrationTarget()) {
4997 __ Branch(deferred->entry(), ne, map_reg, Operand(map));
4998 } else {
4999 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
5000 }
5001
5002 __ bind(&success);
5003}
5004
5005
5006void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5007 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5008 Register result_reg = ToRegister(instr->result());
5009 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5010 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5011}
5012
5013
5014void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5015 Register unclamped_reg = ToRegister(instr->unclamped());
5016 Register result_reg = ToRegister(instr->result());
5017 __ ClampUint8(result_reg, unclamped_reg);
5018}
5019
5020
5021void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5022 Register scratch = scratch0();
5023 Register input_reg = ToRegister(instr->unclamped());
5024 Register result_reg = ToRegister(instr->result());
5025 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5026 Label is_smi, done, heap_number;
5027
5028 // Both smi and heap number cases are handled.
5029 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5030
5031 // Check for heap number
5032 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5033 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5034
5035 // Check for undefined. Undefined is converted to zero for clamping
5036 // conversions.
5037 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
5038 Operand(factory()->undefined_value()));
5039 __ mov(result_reg, zero_reg);
5040 __ jmp(&done);
5041
5042 // Heap number
5043 __ bind(&heap_number);
5044 __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5045 HeapNumber::kValueOffset));
5046 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5047 __ jmp(&done);
5048
5049 __ bind(&is_smi);
5050 __ ClampUint8(result_reg, scratch);
5051
5052 __ bind(&done);
5053}
5054
5055
5056void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5057 DoubleRegister value_reg = ToDoubleRegister(instr->value());
5058 Register result_reg = ToRegister(instr->result());
5059 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5060 __ FmoveHigh(result_reg, value_reg);
5061 } else {
5062 __ FmoveLow(result_reg, value_reg);
5063 }
5064}
5065
5066
5067void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5068 Register hi_reg = ToRegister(instr->hi());
5069 Register lo_reg = ToRegister(instr->lo());
5070 DoubleRegister result_reg = ToDoubleRegister(instr->result());
5071 __ Move(result_reg, lo_reg, hi_reg);
5072}
5073
5074
5075void LCodeGen::DoAllocate(LAllocate* instr) {
5076 class DeferredAllocate final : public LDeferredCode {
5077 public:
5078 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5079 : LDeferredCode(codegen), instr_(instr) { }
5080 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5081 LInstruction* instr() override { return instr_; }
5082
5083 private:
5084 LAllocate* instr_;
5085 };
5086
5087 DeferredAllocate* deferred =
5088 new(zone()) DeferredAllocate(this, instr);
5089
5090 Register result = ToRegister(instr->result());
5091 Register scratch = ToRegister(instr->temp1());
5092 Register scratch2 = ToRegister(instr->temp2());
5093
5094 // Allocate memory for the object.
5095 AllocationFlags flags = TAG_OBJECT;
5096 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5097 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5098 }
5099 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5100 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5101 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5102 }
5103 if (instr->size()->IsConstantOperand()) {
5104 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5105 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5106 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5107 } else {
5108 Register size = ToRegister(instr->size());
5109 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5110 }
5111
5112 __ bind(deferred->exit());
5113
5114 if (instr->hydrogen()->MustPrefillWithFiller()) {
5115 STATIC_ASSERT(kHeapObjectTag == 1);
5116 if (instr->size()->IsConstantOperand()) {
5117 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5118 __ li(scratch, Operand(size - kHeapObjectTag));
5119 } else {
5120 __ Subu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5121 }
5122 __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5123 Label loop;
5124 __ bind(&loop);
5125 __ Subu(scratch, scratch, Operand(kPointerSize));
5126 __ Addu(at, result, Operand(scratch));
5127 __ sw(scratch2, MemOperand(at));
5128 __ Branch(&loop, ge, scratch, Operand(zero_reg));
5129 }
5130}
5131
5132
5133void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5134 Register result = ToRegister(instr->result());
5135
5136 // TODO(3095996): Get rid of this. For now, we need to make the
5137 // result register contain a valid pointer because it is already
5138 // contained in the register pointer map.
5139 __ mov(result, zero_reg);
5140
5141 PushSafepointRegistersScope scope(this);
5142 if (instr->size()->IsRegister()) {
5143 Register size = ToRegister(instr->size());
5144 DCHECK(!size.is(result));
5145 __ SmiTag(size);
5146 __ push(size);
5147 } else {
5148 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5149 if (size >= 0 && size <= Smi::kMaxValue) {
5150 __ Push(Smi::FromInt(size));
5151 } else {
5152 // We should never get here at runtime => abort
5153 __ stop("invalid allocation size");
5154 return;
5155 }
5156 }
5157
5158 int flags = AllocateDoubleAlignFlag::encode(
5159 instr->hydrogen()->MustAllocateDoubleAligned());
5160 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5161 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5162 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5163 } else {
5164 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5165 }
5166 __ Push(Smi::FromInt(flags));
5167
5168 CallRuntimeFromDeferred(
5169 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5170 __ StoreToSafepointRegisterSlot(v0, result);
5171}
5172
5173
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005174void LCodeGen::DoTypeof(LTypeof* instr) {
5175 DCHECK(ToRegister(instr->value()).is(a3));
5176 DCHECK(ToRegister(instr->result()).is(v0));
5177 Label end, do_call;
5178 Register value_register = ToRegister(instr->value());
5179 __ JumpIfNotSmi(value_register, &do_call);
5180 __ li(v0, Operand(isolate()->factory()->number_string()));
5181 __ jmp(&end);
5182 __ bind(&do_call);
5183 TypeofStub stub(isolate());
5184 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5185 __ bind(&end);
5186}
5187
5188
5189void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5190 Register input = ToRegister(instr->value());
5191
5192 Register cmp1 = no_reg;
5193 Operand cmp2 = Operand(no_reg);
5194
5195 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5196 instr->FalseLabel(chunk_),
5197 input,
5198 instr->type_literal(),
5199 &cmp1,
5200 &cmp2);
5201
5202 DCHECK(cmp1.is_valid());
5203 DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
5204
5205 if (final_branch_condition != kNoCondition) {
5206 EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5207 }
5208}
5209
5210
5211Condition LCodeGen::EmitTypeofIs(Label* true_label,
5212 Label* false_label,
5213 Register input,
5214 Handle<String> type_name,
5215 Register* cmp1,
5216 Operand* cmp2) {
5217 // This function utilizes the delay slot heavily. This is used to load
5218 // values that are always usable without depending on the type of the input
5219 // register.
5220 Condition final_branch_condition = kNoCondition;
5221 Register scratch = scratch0();
5222 Factory* factory = isolate()->factory();
5223 if (String::Equals(type_name, factory->number_string())) {
5224 __ JumpIfSmi(input, true_label);
5225 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5226 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5227 *cmp1 = input;
5228 *cmp2 = Operand(at);
5229 final_branch_condition = eq;
5230
5231 } else if (String::Equals(type_name, factory->string_string())) {
5232 __ JumpIfSmi(input, false_label);
5233 __ GetObjectType(input, input, scratch);
5234 *cmp1 = scratch;
5235 *cmp2 = Operand(FIRST_NONSTRING_TYPE);
5236 final_branch_condition = lt;
5237
5238 } else if (String::Equals(type_name, factory->symbol_string())) {
5239 __ JumpIfSmi(input, false_label);
5240 __ GetObjectType(input, input, scratch);
5241 *cmp1 = scratch;
5242 *cmp2 = Operand(SYMBOL_TYPE);
5243 final_branch_condition = eq;
5244
5245 } else if (String::Equals(type_name, factory->boolean_string())) {
5246 __ LoadRoot(at, Heap::kTrueValueRootIndex);
5247 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5248 __ LoadRoot(at, Heap::kFalseValueRootIndex);
5249 *cmp1 = at;
5250 *cmp2 = Operand(input);
5251 final_branch_condition = eq;
5252
5253 } else if (String::Equals(type_name, factory->undefined_string())) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005254 __ LoadRoot(at, Heap::kNullValueRootIndex);
5255 __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005256 // The first instruction of JumpIfSmi is an And - it is safe in the delay
5257 // slot.
5258 __ JumpIfSmi(input, false_label);
5259 // Check for undetectable objects => true.
5260 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5261 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5262 __ And(at, at, 1 << Map::kIsUndetectable);
5263 *cmp1 = at;
5264 *cmp2 = Operand(zero_reg);
5265 final_branch_condition = ne;
5266
5267 } else if (String::Equals(type_name, factory->function_string())) {
5268 __ JumpIfSmi(input, false_label);
5269 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5270 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5271 __ And(scratch, scratch,
5272 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5273 *cmp1 = scratch;
5274 *cmp2 = Operand(1 << Map::kIsCallable);
5275 final_branch_condition = eq;
5276
5277 } else if (String::Equals(type_name, factory->object_string())) {
5278 __ JumpIfSmi(input, false_label);
5279 __ LoadRoot(at, Heap::kNullValueRootIndex);
5280 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5281 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5282 __ GetObjectType(input, scratch, scratch1());
5283 __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
5284 // Check for callable or undetectable objects => false.
5285 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5286 __ And(at, scratch,
5287 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5288 *cmp1 = at;
5289 *cmp2 = Operand(zero_reg);
5290 final_branch_condition = eq;
5291
5292// clang-format off
5293#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5294 } else if (String::Equals(type_name, factory->type##_string())) { \
5295 __ JumpIfSmi(input, false_label); \
5296 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); \
5297 __ LoadRoot(at, Heap::k##Type##MapRootIndex); \
5298 *cmp1 = input; \
5299 *cmp2 = Operand(at); \
5300 final_branch_condition = eq;
5301 SIMD128_TYPES(SIMD128_TYPE)
5302#undef SIMD128_TYPE
5303 // clang-format on
5304
5305 } else {
5306 *cmp1 = at;
5307 *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
5308 __ Branch(false_label);
5309 }
5310
5311 return final_branch_condition;
5312}
5313
5314
5315void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5316 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5317 // Ensure that we have enough space after the previous lazy-bailout
5318 // instruction for patching the code here.
5319 int current_pc = masm()->pc_offset();
5320 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5321 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5322 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5323 while (padding_size > 0) {
5324 __ nop();
5325 padding_size -= Assembler::kInstrSize;
5326 }
5327 }
5328 }
5329 last_lazy_deopt_pc_ = masm()->pc_offset();
5330}
5331
5332
5333void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5334 last_lazy_deopt_pc_ = masm()->pc_offset();
5335 DCHECK(instr->HasEnvironment());
5336 LEnvironment* env = instr->environment();
5337 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5338 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5339}
5340
5341
5342void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5343 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5344 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5345 // needed return address), even though the implementation of LAZY and EAGER is
5346 // now identical. When LAZY is eventually completely folded into EAGER, remove
5347 // the special case below.
5348 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5349 type = Deoptimizer::LAZY;
5350 }
5351
5352 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
5353 Operand(zero_reg));
5354}
5355
5356
5357void LCodeGen::DoDummy(LDummy* instr) {
5358 // Nothing to see here, move on!
5359}
5360
5361
5362void LCodeGen::DoDummyUse(LDummyUse* instr) {
5363 // Nothing to see here, move on!
5364}
5365
5366
5367void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5368 PushSafepointRegistersScope scope(this);
5369 LoadContextFromDeferred(instr->context());
5370 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5371 RecordSafepointWithLazyDeopt(
5372 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5373 DCHECK(instr->HasEnvironment());
5374 LEnvironment* env = instr->environment();
5375 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5376}
5377
5378
5379void LCodeGen::DoStackCheck(LStackCheck* instr) {
5380 class DeferredStackCheck final : public LDeferredCode {
5381 public:
5382 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5383 : LDeferredCode(codegen), instr_(instr) { }
5384 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5385 LInstruction* instr() override { return instr_; }
5386
5387 private:
5388 LStackCheck* instr_;
5389 };
5390
5391 DCHECK(instr->HasEnvironment());
5392 LEnvironment* env = instr->environment();
5393 // There is no LLazyBailout instruction for stack-checks. We have to
5394 // prepare for lazy deoptimization explicitly here.
5395 if (instr->hydrogen()->is_function_entry()) {
5396 // Perform stack overflow check.
5397 Label done;
5398 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5399 __ Branch(&done, hs, sp, Operand(at));
5400 DCHECK(instr->context()->IsRegister());
5401 DCHECK(ToRegister(instr->context()).is(cp));
5402 CallCode(isolate()->builtins()->StackCheck(),
5403 RelocInfo::CODE_TARGET,
5404 instr);
5405 __ bind(&done);
5406 } else {
5407 DCHECK(instr->hydrogen()->is_backwards_branch());
5408 // Perform stack overflow check if this goto needs it before jumping.
5409 DeferredStackCheck* deferred_stack_check =
5410 new(zone()) DeferredStackCheck(this, instr);
5411 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5412 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5413 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5414 __ bind(instr->done_label());
5415 deferred_stack_check->SetExit(instr->done_label());
5416 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5417 // Don't record a deoptimization index for the safepoint here.
5418 // This will be done explicitly when emitting call and the safepoint in
5419 // the deferred code.
5420 }
5421}
5422
5423
5424void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5425 // This is a pseudo-instruction that ensures that the environment here is
5426 // properly registered for deoptimization and records the assembler's PC
5427 // offset.
5428 LEnvironment* environment = instr->environment();
5429
5430 // If the environment were already registered, we would have no way of
5431 // backpatching it with the spill slot operands.
5432 DCHECK(!environment->HasBeenRegistered());
5433 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5434
5435 GenerateOsrPrologue();
5436}
5437
5438
5439void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5440 Register result = ToRegister(instr->result());
5441 Register object = ToRegister(instr->object());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005442
5443 Label use_cache, call_runtime;
5444 DCHECK(object.is(a0));
Ben Murdoch097c5b22016-05-18 11:27:45 +01005445 __ CheckEnumCache(&call_runtime);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005446
5447 __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5448 __ Branch(&use_cache);
5449
5450 // Get the set of properties to enumerate.
5451 __ bind(&call_runtime);
5452 __ push(object);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005453 CallRuntime(Runtime::kForInEnumerate, instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005454 __ bind(&use_cache);
5455}
5456
5457
5458void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5459 Register map = ToRegister(instr->map());
5460 Register result = ToRegister(instr->result());
5461 Label load_cache, done;
5462 __ EnumLength(result, map);
5463 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5464 __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5465 __ jmp(&done);
5466
5467 __ bind(&load_cache);
5468 __ LoadInstanceDescriptors(map, result);
5469 __ lw(result,
5470 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5471 __ lw(result,
5472 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5473 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
5474
5475 __ bind(&done);
5476}
5477
5478
5479void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5480 Register object = ToRegister(instr->value());
5481 Register map = ToRegister(instr->map());
5482 __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5483 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
5484}
5485
5486
5487void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5488 Register result,
5489 Register object,
5490 Register index) {
5491 PushSafepointRegistersScope scope(this);
5492 __ Push(object, index);
5493 __ mov(cp, zero_reg);
5494 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5495 RecordSafepointWithRegisters(
5496 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5497 __ StoreToSafepointRegisterSlot(v0, result);
5498}
5499
5500
5501void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5502 class DeferredLoadMutableDouble final : public LDeferredCode {
5503 public:
5504 DeferredLoadMutableDouble(LCodeGen* codegen,
5505 LLoadFieldByIndex* instr,
5506 Register result,
5507 Register object,
5508 Register index)
5509 : LDeferredCode(codegen),
5510 instr_(instr),
5511 result_(result),
5512 object_(object),
5513 index_(index) {
5514 }
5515 void Generate() override {
5516 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5517 }
5518 LInstruction* instr() override { return instr_; }
5519
5520 private:
5521 LLoadFieldByIndex* instr_;
5522 Register result_;
5523 Register object_;
5524 Register index_;
5525 };
5526
5527 Register object = ToRegister(instr->object());
5528 Register index = ToRegister(instr->index());
5529 Register result = ToRegister(instr->result());
5530 Register scratch = scratch0();
5531
5532 DeferredLoadMutableDouble* deferred;
5533 deferred = new(zone()) DeferredLoadMutableDouble(
5534 this, instr, result, object, index);
5535
5536 Label out_of_object, done;
5537
5538 __ And(scratch, index, Operand(Smi::FromInt(1)));
5539 __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
5540 __ sra(index, index, 1);
5541
5542 __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5543 __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
5544
5545 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5546 __ Addu(scratch, object, scratch);
5547 __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5548
5549 __ Branch(&done);
5550
5551 __ bind(&out_of_object);
5552 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5553 // Index is equal to negated out of object property index plus 1.
5554 __ Subu(scratch, result, scratch);
5555 __ lw(result, FieldMemOperand(scratch,
5556 FixedArray::kHeaderSize - kPointerSize));
5557 __ bind(deferred->exit());
5558 __ bind(&done);
5559}
5560
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005561#undef __
5562
5563} // namespace internal
5564} // namespace v8