blob: 7b2ebadf1f693df1f90e1f0c0497d58a79a8f27d [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/crankshaft/arm/lithium-codegen-arm.h"
6
7#include "src/base/bits.h"
8#include "src/code-factory.h"
9#include "src/code-stubs.h"
10#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
11#include "src/crankshaft/hydrogen-osr.h"
12#include "src/ic/ic.h"
13#include "src/ic/stub-cache.h"
14#include "src/profiler/cpu-profiler.h"
15
16namespace v8 {
17namespace internal {
18
19
20class SafepointGenerator final : public CallWrapper {
21 public:
22 SafepointGenerator(LCodeGen* codegen,
23 LPointerMap* pointers,
24 Safepoint::DeoptMode mode)
25 : codegen_(codegen),
26 pointers_(pointers),
27 deopt_mode_(mode) { }
28 virtual ~SafepointGenerator() {}
29
30 void BeforeCall(int call_size) const override {}
31
32 void AfterCall() const override {
33 codegen_->RecordSafepoint(pointers_, deopt_mode_);
34 }
35
36 private:
37 LCodeGen* codegen_;
38 LPointerMap* pointers_;
39 Safepoint::DeoptMode deopt_mode_;
40};
41
42
43#define __ masm()->
44
45bool LCodeGen::GenerateCode() {
46 LPhase phase("Z_Code generation", chunk());
47 DCHECK(is_unused());
48 status_ = GENERATING;
49
50 // Open a frame scope to indicate that there is a frame on the stack. The
51 // NONE indicates that the scope shouldn't actually generate code to set up
52 // the frame (that is done in GeneratePrologue).
53 FrameScope frame_scope(masm_, StackFrame::NONE);
54
55 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
56 GenerateJumpTable() && GenerateSafepointTable();
57}
58
59
60void LCodeGen::FinishCode(Handle<Code> code) {
61 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +010062 code->set_stack_slots(GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000063 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
64 PopulateDeoptimizationData(code);
65}
66
67
68void LCodeGen::SaveCallerDoubles() {
69 DCHECK(info()->saves_caller_doubles());
70 DCHECK(NeedsEagerFrame());
71 Comment(";;; Save clobbered callee double registers");
72 int count = 0;
73 BitVector* doubles = chunk()->allocated_double_registers();
74 BitVector::Iterator save_iterator(doubles);
75 while (!save_iterator.Done()) {
76 __ vstr(DoubleRegister::from_code(save_iterator.Current()),
77 MemOperand(sp, count * kDoubleSize));
78 save_iterator.Advance();
79 count++;
80 }
81}
82
83
84void LCodeGen::RestoreCallerDoubles() {
85 DCHECK(info()->saves_caller_doubles());
86 DCHECK(NeedsEagerFrame());
87 Comment(";;; Restore clobbered callee double registers");
88 BitVector* doubles = chunk()->allocated_double_registers();
89 BitVector::Iterator save_iterator(doubles);
90 int count = 0;
91 while (!save_iterator.Done()) {
92 __ vldr(DoubleRegister::from_code(save_iterator.Current()),
93 MemOperand(sp, count * kDoubleSize));
94 save_iterator.Advance();
95 count++;
96 }
97}
98
99
100bool LCodeGen::GeneratePrologue() {
101 DCHECK(is_generating());
102
103 if (info()->IsOptimizing()) {
104 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
105
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000106 // r1: Callee's JS function.
107 // cp: Callee's context.
108 // pp: Callee's constant pool pointer (if enabled)
109 // fp: Caller's frame pointer.
110 // lr: Caller's pc.
111 }
112
113 info()->set_prologue_offset(masm_->pc_offset());
114 if (NeedsEagerFrame()) {
115 if (info()->IsStub()) {
116 __ StubPrologue();
117 } else {
118 __ Prologue(info()->GeneratePreagedPrologue());
119 }
120 frame_is_built_ = true;
121 }
122
123 // Reserve space for the stack slots needed by the code.
124 int slots = GetStackSlotCount();
125 if (slots > 0) {
126 if (FLAG_debug_code) {
127 __ sub(sp, sp, Operand(slots * kPointerSize));
128 __ push(r0);
129 __ push(r1);
130 __ add(r0, sp, Operand(slots * kPointerSize));
131 __ mov(r1, Operand(kSlotsZapValue));
132 Label loop;
133 __ bind(&loop);
134 __ sub(r0, r0, Operand(kPointerSize));
135 __ str(r1, MemOperand(r0, 2 * kPointerSize));
136 __ cmp(r0, sp);
137 __ b(ne, &loop);
138 __ pop(r1);
139 __ pop(r0);
140 } else {
141 __ sub(sp, sp, Operand(slots * kPointerSize));
142 }
143 }
144
145 if (info()->saves_caller_doubles()) {
146 SaveCallerDoubles();
147 }
148 return !is_aborted();
149}
150
151
152void LCodeGen::DoPrologue(LPrologue* instr) {
153 Comment(";;; Prologue begin");
154
155 // Possibly allocate a local context.
156 if (info()->scope()->num_heap_slots() > 0) {
157 Comment(";;; Allocate local context");
158 bool need_write_barrier = true;
159 // Argument to NewContext is the function, which is in r1.
160 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
161 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
162 if (info()->scope()->is_script_scope()) {
163 __ push(r1);
164 __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
165 __ CallRuntime(Runtime::kNewScriptContext);
166 deopt_mode = Safepoint::kLazyDeopt;
167 } else if (slots <= FastNewContextStub::kMaximumSlots) {
168 FastNewContextStub stub(isolate(), slots);
169 __ CallStub(&stub);
170 // Result of FastNewContextStub is always in new space.
171 need_write_barrier = false;
172 } else {
173 __ push(r1);
174 __ CallRuntime(Runtime::kNewFunctionContext);
175 }
176 RecordSafepoint(deopt_mode);
177
178 // Context is returned in both r0 and cp. It replaces the context
179 // passed to us. It's saved in the stack and kept live in cp.
180 __ mov(cp, r0);
181 __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
182 // Copy any necessary parameters into the context.
183 int num_parameters = scope()->num_parameters();
184 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
185 for (int i = first_parameter; i < num_parameters; i++) {
186 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
187 if (var->IsContextSlot()) {
188 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
189 (num_parameters - 1 - i) * kPointerSize;
190 // Load parameter from stack.
191 __ ldr(r0, MemOperand(fp, parameter_offset));
192 // Store it in the context.
193 MemOperand target = ContextMemOperand(cp, var->index());
194 __ str(r0, target);
195 // Update the write barrier. This clobbers r3 and r0.
196 if (need_write_barrier) {
197 __ RecordWriteContextSlot(
198 cp,
199 target.offset(),
200 r0,
201 r3,
202 GetLinkRegisterState(),
203 kSaveFPRegs);
204 } else if (FLAG_debug_code) {
205 Label done;
206 __ JumpIfInNewSpace(cp, r0, &done);
207 __ Abort(kExpectedNewSpaceObject);
208 __ bind(&done);
209 }
210 }
211 }
212 Comment(";;; End allocate local context");
213 }
214
215 Comment(";;; Prologue end");
216}
217
218
219void LCodeGen::GenerateOsrPrologue() {
220 // Generate the OSR entry prologue at the first unknown OSR value, or if there
221 // are none, at the OSR entrypoint instruction.
222 if (osr_pc_offset_ >= 0) return;
223
224 osr_pc_offset_ = masm()->pc_offset();
225
226 // Adjust the frame size, subsuming the unoptimized frame into the
227 // optimized frame.
228 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
229 DCHECK(slots >= 0);
230 __ sub(sp, sp, Operand(slots * kPointerSize));
231}
232
233
234void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
235 if (instr->IsCall()) {
236 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
237 }
238 if (!instr->IsLazyBailout() && !instr->IsGap()) {
239 safepoints_.BumpLastLazySafepointIndex();
240 }
241}
242
243
244bool LCodeGen::GenerateDeferredCode() {
245 DCHECK(is_generating());
246 if (deferred_.length() > 0) {
247 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
248 LDeferredCode* code = deferred_[i];
249
250 HValue* value =
251 instructions_->at(code->instruction_index())->hydrogen_value();
252 RecordAndWritePosition(
253 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
254
255 Comment(";;; <@%d,#%d> "
256 "-------------------- Deferred %s --------------------",
257 code->instruction_index(),
258 code->instr()->hydrogen_value()->id(),
259 code->instr()->Mnemonic());
260 __ bind(code->entry());
261 if (NeedsDeferredFrame()) {
262 Comment(";;; Build frame");
263 DCHECK(!frame_is_built_);
264 DCHECK(info()->IsStub());
265 frame_is_built_ = true;
266 __ PushFixedFrame();
267 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
268 __ push(scratch0());
269 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
270 Comment(";;; Deferred code");
271 }
272 code->Generate();
273 if (NeedsDeferredFrame()) {
274 Comment(";;; Destroy frame");
275 DCHECK(frame_is_built_);
276 __ pop(ip);
277 __ PopFixedFrame();
278 frame_is_built_ = false;
279 }
280 __ jmp(code->exit());
281 }
282 }
283
284 // Force constant pool emission at the end of the deferred code to make
285 // sure that no constant pools are emitted after.
286 masm()->CheckConstPool(true, false);
287
288 return !is_aborted();
289}
290
291
292bool LCodeGen::GenerateJumpTable() {
293 // Check that the jump table is accessible from everywhere in the function
294 // code, i.e. that offsets to the table can be encoded in the 24bit signed
295 // immediate of a branch instruction.
296 // To simplify we consider the code size from the first instruction to the
297 // end of the jump table. We also don't consider the pc load delta.
298 // Each entry in the jump table generates one instruction and inlines one
299 // 32bit data after it.
300 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
301 jump_table_.length() * 7)) {
302 Abort(kGeneratedCodeIsTooLarge);
303 }
304
305 if (jump_table_.length() > 0) {
306 Label needs_frame, call_deopt_entry;
307
308 Comment(";;; -------------------- Jump table --------------------");
309 Address base = jump_table_[0].address;
310
311 Register entry_offset = scratch0();
312
313 int length = jump_table_.length();
314 for (int i = 0; i < length; i++) {
315 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
316 __ bind(&table_entry->label);
317
318 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
319 Address entry = table_entry->address;
320 DeoptComment(table_entry->deopt_info);
321
322 // Second-level deopt table entries are contiguous and small, so instead
323 // of loading the full, absolute address of each one, load an immediate
324 // offset which will be added to the base address later.
325 __ mov(entry_offset, Operand(entry - base));
326
327 if (table_entry->needs_frame) {
328 DCHECK(!info()->saves_caller_doubles());
329 Comment(";;; call deopt with frame");
330 __ PushFixedFrame();
331 __ bl(&needs_frame);
332 } else {
333 __ bl(&call_deopt_entry);
334 }
335 info()->LogDeoptCallPosition(masm()->pc_offset(),
336 table_entry->deopt_info.inlining_id);
337 masm()->CheckConstPool(false, false);
338 }
339
340 if (needs_frame.is_linked()) {
341 __ bind(&needs_frame);
342 // This variant of deopt can only be used with stubs. Since we don't
343 // have a function pointer to install in the stack frame that we're
344 // building, install a special marker there instead.
345 DCHECK(info()->IsStub());
346 __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
347 __ push(ip);
348 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
349 }
350
351 Comment(";;; call deopt");
352 __ bind(&call_deopt_entry);
353
354 if (info()->saves_caller_doubles()) {
355 DCHECK(info()->IsStub());
356 RestoreCallerDoubles();
357 }
358
359 // Add the base address to the offset previously loaded in entry_offset.
360 __ add(entry_offset, entry_offset,
361 Operand(ExternalReference::ForDeoptEntry(base)));
362 __ bx(entry_offset);
363 }
364
365 // Force constant pool emission at the end of the deopt jump table to make
366 // sure that no constant pools are emitted after.
367 masm()->CheckConstPool(true, false);
368
369 // The deoptimization jump table is the last part of the instruction
370 // sequence. Mark the generated code as done unless we bailed out.
371 if (!is_aborted()) status_ = DONE;
372 return !is_aborted();
373}
374
375
376bool LCodeGen::GenerateSafepointTable() {
377 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100378 safepoints_.Emit(masm(), GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000379 return !is_aborted();
380}
381
382
383Register LCodeGen::ToRegister(int code) const {
384 return Register::from_code(code);
385}
386
387
388DwVfpRegister LCodeGen::ToDoubleRegister(int code) const {
389 return DwVfpRegister::from_code(code);
390}
391
392
393Register LCodeGen::ToRegister(LOperand* op) const {
394 DCHECK(op->IsRegister());
395 return ToRegister(op->index());
396}
397
398
399Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
400 if (op->IsRegister()) {
401 return ToRegister(op->index());
402 } else if (op->IsConstantOperand()) {
403 LConstantOperand* const_op = LConstantOperand::cast(op);
404 HConstant* constant = chunk_->LookupConstant(const_op);
405 Handle<Object> literal = constant->handle(isolate());
406 Representation r = chunk_->LookupLiteralRepresentation(const_op);
407 if (r.IsInteger32()) {
408 AllowDeferredHandleDereference get_number;
409 DCHECK(literal->IsNumber());
410 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
411 } else if (r.IsDouble()) {
412 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
413 } else {
414 DCHECK(r.IsSmiOrTagged());
415 __ Move(scratch, literal);
416 }
417 return scratch;
418 } else if (op->IsStackSlot()) {
419 __ ldr(scratch, ToMemOperand(op));
420 return scratch;
421 }
422 UNREACHABLE();
423 return scratch;
424}
425
426
427DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
428 DCHECK(op->IsDoubleRegister());
429 return ToDoubleRegister(op->index());
430}
431
432
433DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
434 SwVfpRegister flt_scratch,
435 DwVfpRegister dbl_scratch) {
436 if (op->IsDoubleRegister()) {
437 return ToDoubleRegister(op->index());
438 } else if (op->IsConstantOperand()) {
439 LConstantOperand* const_op = LConstantOperand::cast(op);
440 HConstant* constant = chunk_->LookupConstant(const_op);
441 Handle<Object> literal = constant->handle(isolate());
442 Representation r = chunk_->LookupLiteralRepresentation(const_op);
443 if (r.IsInteger32()) {
444 DCHECK(literal->IsNumber());
445 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
446 __ vmov(flt_scratch, ip);
447 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
448 return dbl_scratch;
449 } else if (r.IsDouble()) {
450 Abort(kUnsupportedDoubleImmediate);
451 } else if (r.IsTagged()) {
452 Abort(kUnsupportedTaggedImmediate);
453 }
454 } else if (op->IsStackSlot()) {
455 // TODO(regis): Why is vldr not taking a MemOperand?
456 // __ vldr(dbl_scratch, ToMemOperand(op));
457 MemOperand mem_op = ToMemOperand(op);
458 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
459 return dbl_scratch;
460 }
461 UNREACHABLE();
462 return dbl_scratch;
463}
464
465
466Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
467 HConstant* constant = chunk_->LookupConstant(op);
468 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
469 return constant->handle(isolate());
470}
471
472
473bool LCodeGen::IsInteger32(LConstantOperand* op) const {
474 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
475}
476
477
478bool LCodeGen::IsSmi(LConstantOperand* op) const {
479 return chunk_->LookupLiteralRepresentation(op).IsSmi();
480}
481
482
483int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
484 return ToRepresentation(op, Representation::Integer32());
485}
486
487
488int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
489 const Representation& r) const {
490 HConstant* constant = chunk_->LookupConstant(op);
491 int32_t value = constant->Integer32Value();
492 if (r.IsInteger32()) return value;
493 DCHECK(r.IsSmiOrTagged());
494 return reinterpret_cast<int32_t>(Smi::FromInt(value));
495}
496
497
498Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
499 HConstant* constant = chunk_->LookupConstant(op);
500 return Smi::FromInt(constant->Integer32Value());
501}
502
503
504double LCodeGen::ToDouble(LConstantOperand* op) const {
505 HConstant* constant = chunk_->LookupConstant(op);
506 DCHECK(constant->HasDoubleValue());
507 return constant->DoubleValue();
508}
509
510
511Operand LCodeGen::ToOperand(LOperand* op) {
512 if (op->IsConstantOperand()) {
513 LConstantOperand* const_op = LConstantOperand::cast(op);
514 HConstant* constant = chunk()->LookupConstant(const_op);
515 Representation r = chunk_->LookupLiteralRepresentation(const_op);
516 if (r.IsSmi()) {
517 DCHECK(constant->HasSmiValue());
518 return Operand(Smi::FromInt(constant->Integer32Value()));
519 } else if (r.IsInteger32()) {
520 DCHECK(constant->HasInteger32Value());
521 return Operand(constant->Integer32Value());
522 } else if (r.IsDouble()) {
523 Abort(kToOperandUnsupportedDoubleImmediate);
524 }
525 DCHECK(r.IsTagged());
526 return Operand(constant->handle(isolate()));
527 } else if (op->IsRegister()) {
528 return Operand(ToRegister(op));
529 } else if (op->IsDoubleRegister()) {
530 Abort(kToOperandIsDoubleRegisterUnimplemented);
531 return Operand::Zero();
532 }
533 // Stack slots not implemented, use ToMemOperand instead.
534 UNREACHABLE();
535 return Operand::Zero();
536}
537
538
539static int ArgumentsOffsetWithoutFrame(int index) {
540 DCHECK(index < 0);
541 return -(index + 1) * kPointerSize;
542}
543
544
545MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
546 DCHECK(!op->IsRegister());
547 DCHECK(!op->IsDoubleRegister());
548 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
549 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100550 return MemOperand(fp, FrameSlotToFPOffset(op->index()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000551 } else {
552 // Retrieve parameter without eager stack-frame relative to the
553 // stack-pointer.
554 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
555 }
556}
557
558
559MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
560 DCHECK(op->IsDoubleStackSlot());
561 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100562 return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000563 } else {
564 // Retrieve parameter without eager stack-frame relative to the
565 // stack-pointer.
566 return MemOperand(
567 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
568 }
569}
570
571
572void LCodeGen::WriteTranslation(LEnvironment* environment,
573 Translation* translation) {
574 if (environment == NULL) return;
575
576 // The translation includes one command per value in the environment.
577 int translation_size = environment->translation_size();
578
579 WriteTranslation(environment->outer(), translation);
580 WriteTranslationFrame(environment, translation);
581
582 int object_index = 0;
583 int dematerialized_index = 0;
584 for (int i = 0; i < translation_size; ++i) {
585 LOperand* value = environment->values()->at(i);
586 AddToTranslation(
587 environment, translation, value, environment->HasTaggedValueAt(i),
588 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
589 }
590}
591
592
593void LCodeGen::AddToTranslation(LEnvironment* environment,
594 Translation* translation,
595 LOperand* op,
596 bool is_tagged,
597 bool is_uint32,
598 int* object_index_pointer,
599 int* dematerialized_index_pointer) {
600 if (op == LEnvironment::materialization_marker()) {
601 int object_index = (*object_index_pointer)++;
602 if (environment->ObjectIsDuplicateAt(object_index)) {
603 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
604 translation->DuplicateObject(dupe_of);
605 return;
606 }
607 int object_length = environment->ObjectLengthAt(object_index);
608 if (environment->ObjectIsArgumentsAt(object_index)) {
609 translation->BeginArgumentsObject(object_length);
610 } else {
611 translation->BeginCapturedObject(object_length);
612 }
613 int dematerialized_index = *dematerialized_index_pointer;
614 int env_offset = environment->translation_size() + dematerialized_index;
615 *dematerialized_index_pointer += object_length;
616 for (int i = 0; i < object_length; ++i) {
617 LOperand* value = environment->values()->at(env_offset + i);
618 AddToTranslation(environment,
619 translation,
620 value,
621 environment->HasTaggedValueAt(env_offset + i),
622 environment->HasUint32ValueAt(env_offset + i),
623 object_index_pointer,
624 dematerialized_index_pointer);
625 }
626 return;
627 }
628
629 if (op->IsStackSlot()) {
630 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000631 if (is_tagged) {
632 translation->StoreStackSlot(index);
633 } else if (is_uint32) {
634 translation->StoreUint32StackSlot(index);
635 } else {
636 translation->StoreInt32StackSlot(index);
637 }
638 } else if (op->IsDoubleStackSlot()) {
639 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000640 translation->StoreDoubleStackSlot(index);
641 } else if (op->IsRegister()) {
642 Register reg = ToRegister(op);
643 if (is_tagged) {
644 translation->StoreRegister(reg);
645 } else if (is_uint32) {
646 translation->StoreUint32Register(reg);
647 } else {
648 translation->StoreInt32Register(reg);
649 }
650 } else if (op->IsDoubleRegister()) {
651 DoubleRegister reg = ToDoubleRegister(op);
652 translation->StoreDoubleRegister(reg);
653 } else if (op->IsConstantOperand()) {
654 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
655 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
656 translation->StoreLiteral(src_index);
657 } else {
658 UNREACHABLE();
659 }
660}
661
662
663int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) {
664 int size = masm()->CallSize(code, mode);
665 if (code->kind() == Code::BINARY_OP_IC ||
666 code->kind() == Code::COMPARE_IC) {
667 size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric.
668 }
669 return size;
670}
671
672
673void LCodeGen::CallCode(Handle<Code> code,
674 RelocInfo::Mode mode,
675 LInstruction* instr,
676 TargetAddressStorageMode storage_mode) {
677 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
678}
679
680
681void LCodeGen::CallCodeGeneric(Handle<Code> code,
682 RelocInfo::Mode mode,
683 LInstruction* instr,
684 SafepointMode safepoint_mode,
685 TargetAddressStorageMode storage_mode) {
686 DCHECK(instr != NULL);
687 // Block literal pool emission to ensure nop indicating no inlined smi code
688 // is in the correct position.
689 Assembler::BlockConstPoolScope block_const_pool(masm());
690 __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
691 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
692
693 // Signal that we don't inline smi code before these stubs in the
694 // optimizing code generator.
695 if (code->kind() == Code::BINARY_OP_IC ||
696 code->kind() == Code::COMPARE_IC) {
697 __ nop();
698 }
699}
700
701
702void LCodeGen::CallRuntime(const Runtime::Function* function,
703 int num_arguments,
704 LInstruction* instr,
705 SaveFPRegsMode save_doubles) {
706 DCHECK(instr != NULL);
707
708 __ CallRuntime(function, num_arguments, save_doubles);
709
710 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
711}
712
713
714void LCodeGen::LoadContextFromDeferred(LOperand* context) {
715 if (context->IsRegister()) {
716 __ Move(cp, ToRegister(context));
717 } else if (context->IsStackSlot()) {
718 __ ldr(cp, ToMemOperand(context));
719 } else if (context->IsConstantOperand()) {
720 HConstant* constant =
721 chunk_->LookupConstant(LConstantOperand::cast(context));
722 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
723 } else {
724 UNREACHABLE();
725 }
726}
727
728
729void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
730 int argc,
731 LInstruction* instr,
732 LOperand* context) {
733 LoadContextFromDeferred(context);
734 __ CallRuntimeSaveDoubles(id);
735 RecordSafepointWithRegisters(
736 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
737}
738
739
740void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
741 Safepoint::DeoptMode mode) {
742 environment->set_has_been_used();
743 if (!environment->HasBeenRegistered()) {
744 // Physical stack frame layout:
745 // -x ............. -4 0 ..................................... y
746 // [incoming arguments] [spill slots] [pushed outgoing arguments]
747
748 // Layout of the environment:
749 // 0 ..................................................... size-1
750 // [parameters] [locals] [expression stack including arguments]
751
752 // Layout of the translation:
753 // 0 ........................................................ size - 1 + 4
754 // [expression stack including arguments] [locals] [4 words] [parameters]
755 // |>------------ translation_size ------------<|
756
757 int frame_count = 0;
758 int jsframe_count = 0;
759 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
760 ++frame_count;
761 if (e->frame_type() == JS_FUNCTION) {
762 ++jsframe_count;
763 }
764 }
765 Translation translation(&translations_, frame_count, jsframe_count, zone());
766 WriteTranslation(environment, &translation);
767 int deoptimization_index = deoptimizations_.length();
768 int pc_offset = masm()->pc_offset();
769 environment->Register(deoptimization_index,
770 translation.index(),
771 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
772 deoptimizations_.Add(environment, zone());
773 }
774}
775
776
777void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
778 Deoptimizer::DeoptReason deopt_reason,
779 Deoptimizer::BailoutType bailout_type) {
780 LEnvironment* environment = instr->environment();
781 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
782 DCHECK(environment->HasBeenRegistered());
783 int id = environment->deoptimization_index();
784 Address entry =
785 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
786 if (entry == NULL) {
787 Abort(kBailoutWasNotPrepared);
788 return;
789 }
790
791 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
792 Register scratch = scratch0();
793 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
794
795 // Store the condition on the stack if necessary
796 if (condition != al) {
797 __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
798 __ mov(scratch, Operand(1), LeaveCC, condition);
799 __ push(scratch);
800 }
801
802 __ push(r1);
803 __ mov(scratch, Operand(count));
804 __ ldr(r1, MemOperand(scratch));
805 __ sub(r1, r1, Operand(1), SetCC);
806 __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
807 __ str(r1, MemOperand(scratch));
808 __ pop(r1);
809
810 if (condition != al) {
811 // Clean up the stack before the deoptimizer call
812 __ pop(scratch);
813 }
814
815 __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
816
817 // 'Restore' the condition in a slightly hacky way. (It would be better
818 // to use 'msr' and 'mrs' instructions here, but they are not supported by
819 // our ARM simulator).
820 if (condition != al) {
821 condition = ne;
822 __ cmp(scratch, Operand::Zero());
823 }
824 }
825
826 if (info()->ShouldTrapOnDeopt()) {
827 __ stop("trap_on_deopt", condition);
828 }
829
830 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
831
832 DCHECK(info()->IsStub() || frame_is_built_);
833 // Go through jump table if we need to handle condition, build frame, or
834 // restore caller doubles.
835 if (condition == al && frame_is_built_ &&
836 !info()->saves_caller_doubles()) {
837 DeoptComment(deopt_info);
838 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
839 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
840 } else {
841 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
842 !frame_is_built_);
843 // We often have several deopts to the same entry, reuse the last
844 // jump entry if this is the case.
845 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
846 jump_table_.is_empty() ||
847 !table_entry.IsEquivalentTo(jump_table_.last())) {
848 jump_table_.Add(table_entry, zone());
849 }
850 __ b(condition, &jump_table_.last().label);
851 }
852}
853
854
855void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
856 Deoptimizer::DeoptReason deopt_reason) {
857 Deoptimizer::BailoutType bailout_type = info()->IsStub()
858 ? Deoptimizer::LAZY
859 : Deoptimizer::EAGER;
860 DeoptimizeIf(condition, instr, deopt_reason, bailout_type);
861}
862
863
864void LCodeGen::RecordSafepointWithLazyDeopt(
865 LInstruction* instr, SafepointMode safepoint_mode) {
866 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
867 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
868 } else {
869 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
870 RecordSafepointWithRegisters(
871 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
872 }
873}
874
875
876void LCodeGen::RecordSafepoint(
877 LPointerMap* pointers,
878 Safepoint::Kind kind,
879 int arguments,
880 Safepoint::DeoptMode deopt_mode) {
881 DCHECK(expected_safepoint_kind_ == kind);
882
883 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
884 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
885 kind, arguments, deopt_mode);
886 for (int i = 0; i < operands->length(); i++) {
887 LOperand* pointer = operands->at(i);
888 if (pointer->IsStackSlot()) {
889 safepoint.DefinePointerSlot(pointer->index(), zone());
890 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
891 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
892 }
893 }
894}
895
896
897void LCodeGen::RecordSafepoint(LPointerMap* pointers,
898 Safepoint::DeoptMode deopt_mode) {
899 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
900}
901
902
903void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
904 LPointerMap empty_pointers(zone());
905 RecordSafepoint(&empty_pointers, deopt_mode);
906}
907
908
909void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
910 int arguments,
911 Safepoint::DeoptMode deopt_mode) {
912 RecordSafepoint(
913 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
914}
915
916
917void LCodeGen::RecordAndWritePosition(int position) {
918 if (position == RelocInfo::kNoPosition) return;
919 masm()->positions_recorder()->RecordPosition(position);
920 masm()->positions_recorder()->WriteRecordedPositions();
921}
922
923
924static const char* LabelType(LLabel* label) {
925 if (label->is_loop_header()) return " (loop header)";
926 if (label->is_osr_entry()) return " (OSR entry)";
927 return "";
928}
929
930
931void LCodeGen::DoLabel(LLabel* label) {
932 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
933 current_instruction_,
934 label->hydrogen_value()->id(),
935 label->block_id(),
936 LabelType(label));
937 __ bind(label->label());
938 current_block_ = label->block_id();
939 DoGap(label);
940}
941
942
943void LCodeGen::DoParallelMove(LParallelMove* move) {
944 resolver_.Resolve(move);
945}
946
947
948void LCodeGen::DoGap(LGap* gap) {
949 for (int i = LGap::FIRST_INNER_POSITION;
950 i <= LGap::LAST_INNER_POSITION;
951 i++) {
952 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
953 LParallelMove* move = gap->GetParallelMove(inner_pos);
954 if (move != NULL) DoParallelMove(move);
955 }
956}
957
958
959void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
960 DoGap(instr);
961}
962
963
964void LCodeGen::DoParameter(LParameter* instr) {
965 // Nothing to do.
966}
967
968
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000969void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
970 GenerateOsrPrologue();
971}
972
973
974void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
975 Register dividend = ToRegister(instr->dividend());
976 int32_t divisor = instr->divisor();
977 DCHECK(dividend.is(ToRegister(instr->result())));
978
979 // Theoretically, a variation of the branch-free code for integer division by
980 // a power of 2 (calculating the remainder via an additional multiplication
981 // (which gets simplified to an 'and') and subtraction) should be faster, and
982 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
983 // indicate that positive dividends are heavily favored, so the branching
984 // version performs better.
985 HMod* hmod = instr->hydrogen();
986 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
987 Label dividend_is_not_negative, done;
988 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
989 __ cmp(dividend, Operand::Zero());
990 __ b(pl, &dividend_is_not_negative);
991 // Note that this is correct even for kMinInt operands.
992 __ rsb(dividend, dividend, Operand::Zero());
993 __ and_(dividend, dividend, Operand(mask));
994 __ rsb(dividend, dividend, Operand::Zero(), SetCC);
995 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
996 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
997 }
998 __ b(&done);
999 }
1000
1001 __ bind(&dividend_is_not_negative);
1002 __ and_(dividend, dividend, Operand(mask));
1003 __ bind(&done);
1004}
1005
1006
1007void LCodeGen::DoModByConstI(LModByConstI* instr) {
1008 Register dividend = ToRegister(instr->dividend());
1009 int32_t divisor = instr->divisor();
1010 Register result = ToRegister(instr->result());
1011 DCHECK(!dividend.is(result));
1012
1013 if (divisor == 0) {
1014 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1015 return;
1016 }
1017
1018 __ TruncatingDiv(result, dividend, Abs(divisor));
1019 __ mov(ip, Operand(Abs(divisor)));
1020 __ smull(result, ip, result, ip);
1021 __ sub(result, dividend, result, SetCC);
1022
1023 // Check for negative zero.
1024 HMod* hmod = instr->hydrogen();
1025 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1026 Label remainder_not_zero;
1027 __ b(ne, &remainder_not_zero);
1028 __ cmp(dividend, Operand::Zero());
1029 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1030 __ bind(&remainder_not_zero);
1031 }
1032}
1033
1034
1035void LCodeGen::DoModI(LModI* instr) {
1036 HMod* hmod = instr->hydrogen();
1037 if (CpuFeatures::IsSupported(SUDIV)) {
1038 CpuFeatureScope scope(masm(), SUDIV);
1039
1040 Register left_reg = ToRegister(instr->left());
1041 Register right_reg = ToRegister(instr->right());
1042 Register result_reg = ToRegister(instr->result());
1043
1044 Label done;
1045 // Check for x % 0, sdiv might signal an exception. We have to deopt in this
1046 // case because we can't return a NaN.
1047 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1048 __ cmp(right_reg, Operand::Zero());
1049 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1050 }
1051
1052 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
1053 // want. We have to deopt if we care about -0, because we can't return that.
1054 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1055 Label no_overflow_possible;
1056 __ cmp(left_reg, Operand(kMinInt));
1057 __ b(ne, &no_overflow_possible);
1058 __ cmp(right_reg, Operand(-1));
1059 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1060 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1061 } else {
1062 __ b(ne, &no_overflow_possible);
1063 __ mov(result_reg, Operand::Zero());
1064 __ jmp(&done);
1065 }
1066 __ bind(&no_overflow_possible);
1067 }
1068
1069 // For 'r3 = r1 % r2' we can have the following ARM code:
1070 // sdiv r3, r1, r2
1071 // mls r3, r3, r2, r1
1072
1073 __ sdiv(result_reg, left_reg, right_reg);
1074 __ Mls(result_reg, result_reg, right_reg, left_reg);
1075
1076 // If we care about -0, test if the dividend is <0 and the result is 0.
1077 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1078 __ cmp(result_reg, Operand::Zero());
1079 __ b(ne, &done);
1080 __ cmp(left_reg, Operand::Zero());
1081 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1082 }
1083 __ bind(&done);
1084
1085 } else {
1086 // General case, without any SDIV support.
1087 Register left_reg = ToRegister(instr->left());
1088 Register right_reg = ToRegister(instr->right());
1089 Register result_reg = ToRegister(instr->result());
1090 Register scratch = scratch0();
1091 DCHECK(!scratch.is(left_reg));
1092 DCHECK(!scratch.is(right_reg));
1093 DCHECK(!scratch.is(result_reg));
1094 DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1095 DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1096 DCHECK(!divisor.is(dividend));
1097 LowDwVfpRegister quotient = double_scratch0();
1098 DCHECK(!quotient.is(dividend));
1099 DCHECK(!quotient.is(divisor));
1100
1101 Label done;
1102 // Check for x % 0, we have to deopt in this case because we can't return a
1103 // NaN.
1104 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1105 __ cmp(right_reg, Operand::Zero());
1106 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1107 }
1108
1109 __ Move(result_reg, left_reg);
1110 // Load the arguments in VFP registers. The divisor value is preloaded
1111 // before. Be careful that 'right_reg' is only live on entry.
1112 // TODO(svenpanne) The last comments seems to be wrong nowadays.
1113 __ vmov(double_scratch0().low(), left_reg);
1114 __ vcvt_f64_s32(dividend, double_scratch0().low());
1115 __ vmov(double_scratch0().low(), right_reg);
1116 __ vcvt_f64_s32(divisor, double_scratch0().low());
1117
1118 // We do not care about the sign of the divisor. Note that we still handle
1119 // the kMinInt % -1 case correctly, though.
1120 __ vabs(divisor, divisor);
1121 // Compute the quotient and round it to a 32bit integer.
1122 __ vdiv(quotient, dividend, divisor);
1123 __ vcvt_s32_f64(quotient.low(), quotient);
1124 __ vcvt_f64_s32(quotient, quotient.low());
1125
1126 // Compute the remainder in result.
1127 __ vmul(double_scratch0(), divisor, quotient);
1128 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1129 __ vmov(scratch, double_scratch0().low());
1130 __ sub(result_reg, left_reg, scratch, SetCC);
1131
1132 // If we care about -0, test if the dividend is <0 and the result is 0.
1133 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1134 __ b(ne, &done);
1135 __ cmp(left_reg, Operand::Zero());
1136 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
1137 }
1138 __ bind(&done);
1139 }
1140}
1141
1142
1143void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1144 Register dividend = ToRegister(instr->dividend());
1145 int32_t divisor = instr->divisor();
1146 Register result = ToRegister(instr->result());
1147 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1148 DCHECK(!result.is(dividend));
1149
1150 // Check for (0 / -x) that will produce negative zero.
1151 HDiv* hdiv = instr->hydrogen();
1152 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1153 __ cmp(dividend, Operand::Zero());
1154 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1155 }
1156 // Check for (kMinInt / -1).
1157 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1158 __ cmp(dividend, Operand(kMinInt));
1159 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1160 }
1161 // Deoptimize if remainder will not be 0.
1162 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1163 divisor != 1 && divisor != -1) {
1164 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1165 __ tst(dividend, Operand(mask));
1166 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1167 }
1168
1169 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1170 __ rsb(result, dividend, Operand(0));
1171 return;
1172 }
1173 int32_t shift = WhichPowerOf2Abs(divisor);
1174 if (shift == 0) {
1175 __ mov(result, dividend);
1176 } else if (shift == 1) {
1177 __ add(result, dividend, Operand(dividend, LSR, 31));
1178 } else {
1179 __ mov(result, Operand(dividend, ASR, 31));
1180 __ add(result, dividend, Operand(result, LSR, 32 - shift));
1181 }
1182 if (shift > 0) __ mov(result, Operand(result, ASR, shift));
1183 if (divisor < 0) __ rsb(result, result, Operand(0));
1184}
1185
1186
1187void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1188 Register dividend = ToRegister(instr->dividend());
1189 int32_t divisor = instr->divisor();
1190 Register result = ToRegister(instr->result());
1191 DCHECK(!dividend.is(result));
1192
1193 if (divisor == 0) {
1194 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1195 return;
1196 }
1197
1198 // Check for (0 / -x) that will produce negative zero.
1199 HDiv* hdiv = instr->hydrogen();
1200 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1201 __ cmp(dividend, Operand::Zero());
1202 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1203 }
1204
1205 __ TruncatingDiv(result, dividend, Abs(divisor));
1206 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1207
1208 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1209 __ mov(ip, Operand(divisor));
1210 __ smull(scratch0(), ip, result, ip);
1211 __ sub(scratch0(), scratch0(), dividend, SetCC);
1212 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1213 }
1214}
1215
1216
1217// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1218void LCodeGen::DoDivI(LDivI* instr) {
1219 HBinaryOperation* hdiv = instr->hydrogen();
1220 Register dividend = ToRegister(instr->dividend());
1221 Register divisor = ToRegister(instr->divisor());
1222 Register result = ToRegister(instr->result());
1223
1224 // Check for x / 0.
1225 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1226 __ cmp(divisor, Operand::Zero());
1227 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1228 }
1229
1230 // Check for (0 / -x) that will produce negative zero.
1231 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1232 Label positive;
1233 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1234 // Do the test only if it hadn't be done above.
1235 __ cmp(divisor, Operand::Zero());
1236 }
1237 __ b(pl, &positive);
1238 __ cmp(dividend, Operand::Zero());
1239 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1240 __ bind(&positive);
1241 }
1242
1243 // Check for (kMinInt / -1).
1244 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1245 (!CpuFeatures::IsSupported(SUDIV) ||
1246 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1247 // We don't need to check for overflow when truncating with sdiv
1248 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1249 __ cmp(dividend, Operand(kMinInt));
1250 __ cmp(divisor, Operand(-1), eq);
1251 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1252 }
1253
1254 if (CpuFeatures::IsSupported(SUDIV)) {
1255 CpuFeatureScope scope(masm(), SUDIV);
1256 __ sdiv(result, dividend, divisor);
1257 } else {
1258 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1259 DoubleRegister vright = double_scratch0();
1260 __ vmov(double_scratch0().low(), dividend);
1261 __ vcvt_f64_s32(vleft, double_scratch0().low());
1262 __ vmov(double_scratch0().low(), divisor);
1263 __ vcvt_f64_s32(vright, double_scratch0().low());
1264 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1265 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1266 __ vmov(result, double_scratch0().low());
1267 }
1268
1269 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1270 // Compute remainder and deopt if it's not zero.
1271 Register remainder = scratch0();
1272 __ Mls(remainder, result, divisor, dividend);
1273 __ cmp(remainder, Operand::Zero());
1274 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1275 }
1276}
1277
1278
1279void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1280 DwVfpRegister addend = ToDoubleRegister(instr->addend());
1281 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1282 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1283
1284 // This is computed in-place.
1285 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1286
1287 __ vmla(addend, multiplier, multiplicand);
1288}
1289
1290
1291void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1292 DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1293 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1294 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1295
1296 // This is computed in-place.
1297 DCHECK(minuend.is(ToDoubleRegister(instr->result())));
1298
1299 __ vmls(minuend, multiplier, multiplicand);
1300}
1301
1302
1303void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1304 Register dividend = ToRegister(instr->dividend());
1305 Register result = ToRegister(instr->result());
1306 int32_t divisor = instr->divisor();
1307
1308 // If the divisor is 1, return the dividend.
1309 if (divisor == 1) {
1310 __ Move(result, dividend);
1311 return;
1312 }
1313
1314 // If the divisor is positive, things are easy: There can be no deopts and we
1315 // can simply do an arithmetic right shift.
1316 int32_t shift = WhichPowerOf2Abs(divisor);
1317 if (divisor > 1) {
1318 __ mov(result, Operand(dividend, ASR, shift));
1319 return;
1320 }
1321
1322 // If the divisor is negative, we have to negate and handle edge cases.
1323 __ rsb(result, dividend, Operand::Zero(), SetCC);
1324 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1325 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1326 }
1327
1328 // Dividing by -1 is basically negation, unless we overflow.
1329 if (divisor == -1) {
1330 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1331 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1332 }
1333 return;
1334 }
1335
1336 // If the negation could not overflow, simply shifting is OK.
1337 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1338 __ mov(result, Operand(result, ASR, shift));
1339 return;
1340 }
1341
1342 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
1343 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc);
1344}
1345
1346
1347void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1348 Register dividend = ToRegister(instr->dividend());
1349 int32_t divisor = instr->divisor();
1350 Register result = ToRegister(instr->result());
1351 DCHECK(!dividend.is(result));
1352
1353 if (divisor == 0) {
1354 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1355 return;
1356 }
1357
1358 // Check for (0 / -x) that will produce negative zero.
1359 HMathFloorOfDiv* hdiv = instr->hydrogen();
1360 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1361 __ cmp(dividend, Operand::Zero());
1362 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1363 }
1364
1365 // Easy case: We need no dynamic check for the dividend and the flooring
1366 // division is the same as the truncating division.
1367 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1368 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1369 __ TruncatingDiv(result, dividend, Abs(divisor));
1370 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1371 return;
1372 }
1373
1374 // In the general case we may need to adjust before and after the truncating
1375 // division to get a flooring division.
1376 Register temp = ToRegister(instr->temp());
1377 DCHECK(!temp.is(dividend) && !temp.is(result));
1378 Label needs_adjustment, done;
1379 __ cmp(dividend, Operand::Zero());
1380 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1381 __ TruncatingDiv(result, dividend, Abs(divisor));
1382 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1383 __ jmp(&done);
1384 __ bind(&needs_adjustment);
1385 __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1386 __ TruncatingDiv(result, temp, Abs(divisor));
1387 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1388 __ sub(result, result, Operand(1));
1389 __ bind(&done);
1390}
1391
1392
1393// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1394void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1395 HBinaryOperation* hdiv = instr->hydrogen();
1396 Register left = ToRegister(instr->dividend());
1397 Register right = ToRegister(instr->divisor());
1398 Register result = ToRegister(instr->result());
1399
1400 // Check for x / 0.
1401 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1402 __ cmp(right, Operand::Zero());
1403 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1404 }
1405
1406 // Check for (0 / -x) that will produce negative zero.
1407 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1408 Label positive;
1409 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1410 // Do the test only if it hadn't be done above.
1411 __ cmp(right, Operand::Zero());
1412 }
1413 __ b(pl, &positive);
1414 __ cmp(left, Operand::Zero());
1415 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1416 __ bind(&positive);
1417 }
1418
1419 // Check for (kMinInt / -1).
1420 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1421 (!CpuFeatures::IsSupported(SUDIV) ||
1422 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1423 // We don't need to check for overflow when truncating with sdiv
1424 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1425 __ cmp(left, Operand(kMinInt));
1426 __ cmp(right, Operand(-1), eq);
1427 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1428 }
1429
1430 if (CpuFeatures::IsSupported(SUDIV)) {
1431 CpuFeatureScope scope(masm(), SUDIV);
1432 __ sdiv(result, left, right);
1433 } else {
1434 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1435 DoubleRegister vright = double_scratch0();
1436 __ vmov(double_scratch0().low(), left);
1437 __ vcvt_f64_s32(vleft, double_scratch0().low());
1438 __ vmov(double_scratch0().low(), right);
1439 __ vcvt_f64_s32(vright, double_scratch0().low());
1440 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1441 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1442 __ vmov(result, double_scratch0().low());
1443 }
1444
1445 Label done;
1446 Register remainder = scratch0();
1447 __ Mls(remainder, result, right, left);
1448 __ cmp(remainder, Operand::Zero());
1449 __ b(eq, &done);
1450 __ eor(remainder, remainder, Operand(right));
1451 __ add(result, result, Operand(remainder, ASR, 31));
1452 __ bind(&done);
1453}
1454
1455
1456void LCodeGen::DoMulI(LMulI* instr) {
1457 Register result = ToRegister(instr->result());
1458 // Note that result may alias left.
1459 Register left = ToRegister(instr->left());
1460 LOperand* right_op = instr->right();
1461
1462 bool bailout_on_minus_zero =
1463 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1464 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1465
1466 if (right_op->IsConstantOperand()) {
1467 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1468
1469 if (bailout_on_minus_zero && (constant < 0)) {
1470 // The case of a null constant will be handled separately.
1471 // If constant is negative and left is null, the result should be -0.
1472 __ cmp(left, Operand::Zero());
1473 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1474 }
1475
1476 switch (constant) {
1477 case -1:
1478 if (overflow) {
1479 __ rsb(result, left, Operand::Zero(), SetCC);
1480 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1481 } else {
1482 __ rsb(result, left, Operand::Zero());
1483 }
1484 break;
1485 case 0:
1486 if (bailout_on_minus_zero) {
1487 // If left is strictly negative and the constant is null, the
1488 // result is -0. Deoptimize if required, otherwise return 0.
1489 __ cmp(left, Operand::Zero());
1490 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
1491 }
1492 __ mov(result, Operand::Zero());
1493 break;
1494 case 1:
1495 __ Move(result, left);
1496 break;
1497 default:
1498 // Multiplying by powers of two and powers of two plus or minus
1499 // one can be done faster with shifted operands.
1500 // For other constants we emit standard code.
1501 int32_t mask = constant >> 31;
1502 uint32_t constant_abs = (constant + mask) ^ mask;
1503
1504 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1505 int32_t shift = WhichPowerOf2(constant_abs);
1506 __ mov(result, Operand(left, LSL, shift));
1507 // Correct the sign of the result is the constant is negative.
1508 if (constant < 0) __ rsb(result, result, Operand::Zero());
1509 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1510 int32_t shift = WhichPowerOf2(constant_abs - 1);
1511 __ add(result, left, Operand(left, LSL, shift));
1512 // Correct the sign of the result is the constant is negative.
1513 if (constant < 0) __ rsb(result, result, Operand::Zero());
1514 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1515 int32_t shift = WhichPowerOf2(constant_abs + 1);
1516 __ rsb(result, left, Operand(left, LSL, shift));
1517 // Correct the sign of the result is the constant is negative.
1518 if (constant < 0) __ rsb(result, result, Operand::Zero());
1519 } else {
1520 // Generate standard code.
1521 __ mov(ip, Operand(constant));
1522 __ mul(result, left, ip);
1523 }
1524 }
1525
1526 } else {
1527 DCHECK(right_op->IsRegister());
1528 Register right = ToRegister(right_op);
1529
1530 if (overflow) {
1531 Register scratch = scratch0();
1532 // scratch:result = left * right.
1533 if (instr->hydrogen()->representation().IsSmi()) {
1534 __ SmiUntag(result, left);
1535 __ smull(result, scratch, result, right);
1536 } else {
1537 __ smull(result, scratch, left, right);
1538 }
1539 __ cmp(scratch, Operand(result, ASR, 31));
1540 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1541 } else {
1542 if (instr->hydrogen()->representation().IsSmi()) {
1543 __ SmiUntag(result, left);
1544 __ mul(result, result, right);
1545 } else {
1546 __ mul(result, left, right);
1547 }
1548 }
1549
1550 if (bailout_on_minus_zero) {
1551 Label done;
1552 __ teq(left, Operand(right));
1553 __ b(pl, &done);
1554 // Bail out if the result is minus zero.
1555 __ cmp(result, Operand::Zero());
1556 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1557 __ bind(&done);
1558 }
1559 }
1560}
1561
1562
1563void LCodeGen::DoBitI(LBitI* instr) {
1564 LOperand* left_op = instr->left();
1565 LOperand* right_op = instr->right();
1566 DCHECK(left_op->IsRegister());
1567 Register left = ToRegister(left_op);
1568 Register result = ToRegister(instr->result());
1569 Operand right(no_reg);
1570
1571 if (right_op->IsStackSlot()) {
1572 right = Operand(EmitLoadRegister(right_op, ip));
1573 } else {
1574 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1575 right = ToOperand(right_op);
1576 }
1577
1578 switch (instr->op()) {
1579 case Token::BIT_AND:
1580 __ and_(result, left, right);
1581 break;
1582 case Token::BIT_OR:
1583 __ orr(result, left, right);
1584 break;
1585 case Token::BIT_XOR:
1586 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1587 __ mvn(result, Operand(left));
1588 } else {
1589 __ eor(result, left, right);
1590 }
1591 break;
1592 default:
1593 UNREACHABLE();
1594 break;
1595 }
1596}
1597
1598
1599void LCodeGen::DoShiftI(LShiftI* instr) {
1600 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1601 // result may alias either of them.
1602 LOperand* right_op = instr->right();
1603 Register left = ToRegister(instr->left());
1604 Register result = ToRegister(instr->result());
1605 Register scratch = scratch0();
1606 if (right_op->IsRegister()) {
1607 // Mask the right_op operand.
1608 __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1609 switch (instr->op()) {
1610 case Token::ROR:
1611 __ mov(result, Operand(left, ROR, scratch));
1612 break;
1613 case Token::SAR:
1614 __ mov(result, Operand(left, ASR, scratch));
1615 break;
1616 case Token::SHR:
1617 if (instr->can_deopt()) {
1618 __ mov(result, Operand(left, LSR, scratch), SetCC);
1619 DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue);
1620 } else {
1621 __ mov(result, Operand(left, LSR, scratch));
1622 }
1623 break;
1624 case Token::SHL:
1625 __ mov(result, Operand(left, LSL, scratch));
1626 break;
1627 default:
1628 UNREACHABLE();
1629 break;
1630 }
1631 } else {
1632 // Mask the right_op operand.
1633 int value = ToInteger32(LConstantOperand::cast(right_op));
1634 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1635 switch (instr->op()) {
1636 case Token::ROR:
1637 if (shift_count != 0) {
1638 __ mov(result, Operand(left, ROR, shift_count));
1639 } else {
1640 __ Move(result, left);
1641 }
1642 break;
1643 case Token::SAR:
1644 if (shift_count != 0) {
1645 __ mov(result, Operand(left, ASR, shift_count));
1646 } else {
1647 __ Move(result, left);
1648 }
1649 break;
1650 case Token::SHR:
1651 if (shift_count != 0) {
1652 __ mov(result, Operand(left, LSR, shift_count));
1653 } else {
1654 if (instr->can_deopt()) {
1655 __ tst(left, Operand(0x80000000));
1656 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
1657 }
1658 __ Move(result, left);
1659 }
1660 break;
1661 case Token::SHL:
1662 if (shift_count != 0) {
1663 if (instr->hydrogen_value()->representation().IsSmi() &&
1664 instr->can_deopt()) {
1665 if (shift_count != 1) {
1666 __ mov(result, Operand(left, LSL, shift_count - 1));
1667 __ SmiTag(result, result, SetCC);
1668 } else {
1669 __ SmiTag(result, left, SetCC);
1670 }
1671 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1672 } else {
1673 __ mov(result, Operand(left, LSL, shift_count));
1674 }
1675 } else {
1676 __ Move(result, left);
1677 }
1678 break;
1679 default:
1680 UNREACHABLE();
1681 break;
1682 }
1683 }
1684}
1685
1686
1687void LCodeGen::DoSubI(LSubI* instr) {
1688 LOperand* left = instr->left();
1689 LOperand* right = instr->right();
1690 LOperand* result = instr->result();
1691 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1692 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1693
1694 if (right->IsStackSlot()) {
1695 Register right_reg = EmitLoadRegister(right, ip);
1696 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1697 } else {
1698 DCHECK(right->IsRegister() || right->IsConstantOperand());
1699 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1700 }
1701
1702 if (can_overflow) {
1703 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1704 }
1705}
1706
1707
1708void LCodeGen::DoRSubI(LRSubI* instr) {
1709 LOperand* left = instr->left();
1710 LOperand* right = instr->right();
1711 LOperand* result = instr->result();
1712 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1713 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1714
1715 if (right->IsStackSlot()) {
1716 Register right_reg = EmitLoadRegister(right, ip);
1717 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1718 } else {
1719 DCHECK(right->IsRegister() || right->IsConstantOperand());
1720 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1721 }
1722
1723 if (can_overflow) {
1724 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1725 }
1726}
1727
1728
1729void LCodeGen::DoConstantI(LConstantI* instr) {
1730 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1731}
1732
1733
1734void LCodeGen::DoConstantS(LConstantS* instr) {
1735 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1736}
1737
1738
1739void LCodeGen::DoConstantD(LConstantD* instr) {
1740 DCHECK(instr->result()->IsDoubleRegister());
1741 DwVfpRegister result = ToDoubleRegister(instr->result());
1742#if V8_HOST_ARCH_IA32
1743 // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
1744 // builds.
1745 uint64_t bits = instr->bits();
1746 if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
1747 V8_UINT64_C(0x7FF0000000000000)) {
1748 uint32_t lo = static_cast<uint32_t>(bits);
1749 uint32_t hi = static_cast<uint32_t>(bits >> 32);
1750 __ mov(ip, Operand(lo));
1751 __ mov(scratch0(), Operand(hi));
1752 __ vmov(result, ip, scratch0());
1753 return;
1754 }
1755#endif
1756 double v = instr->value();
1757 __ Vmov(result, v, scratch0());
1758}
1759
1760
1761void LCodeGen::DoConstantE(LConstantE* instr) {
1762 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1763}
1764
1765
1766void LCodeGen::DoConstantT(LConstantT* instr) {
1767 Handle<Object> object = instr->value(isolate());
1768 AllowDeferredHandleDereference smi_check;
1769 __ Move(ToRegister(instr->result()), object);
1770}
1771
1772
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001773MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1774 LOperand* index,
1775 String::Encoding encoding) {
1776 if (index->IsConstantOperand()) {
1777 int offset = ToInteger32(LConstantOperand::cast(index));
1778 if (encoding == String::TWO_BYTE_ENCODING) {
1779 offset *= kUC16Size;
1780 }
1781 STATIC_ASSERT(kCharSize == 1);
1782 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1783 }
1784 Register scratch = scratch0();
1785 DCHECK(!scratch.is(string));
1786 DCHECK(!scratch.is(ToRegister(index)));
1787 if (encoding == String::ONE_BYTE_ENCODING) {
1788 __ add(scratch, string, Operand(ToRegister(index)));
1789 } else {
1790 STATIC_ASSERT(kUC16Size == 2);
1791 __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
1792 }
1793 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1794}
1795
1796
1797void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1798 String::Encoding encoding = instr->hydrogen()->encoding();
1799 Register string = ToRegister(instr->string());
1800 Register result = ToRegister(instr->result());
1801
1802 if (FLAG_debug_code) {
1803 Register scratch = scratch0();
1804 __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1805 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1806
1807 __ and_(scratch, scratch,
1808 Operand(kStringRepresentationMask | kStringEncodingMask));
1809 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1810 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1811 __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1812 ? one_byte_seq_type : two_byte_seq_type));
1813 __ Check(eq, kUnexpectedStringType);
1814 }
1815
1816 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1817 if (encoding == String::ONE_BYTE_ENCODING) {
1818 __ ldrb(result, operand);
1819 } else {
1820 __ ldrh(result, operand);
1821 }
1822}
1823
1824
1825void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1826 String::Encoding encoding = instr->hydrogen()->encoding();
1827 Register string = ToRegister(instr->string());
1828 Register value = ToRegister(instr->value());
1829
1830 if (FLAG_debug_code) {
1831 Register index = ToRegister(instr->index());
1832 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1833 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1834 int encoding_mask =
1835 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1836 ? one_byte_seq_type : two_byte_seq_type;
1837 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1838 }
1839
1840 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1841 if (encoding == String::ONE_BYTE_ENCODING) {
1842 __ strb(value, operand);
1843 } else {
1844 __ strh(value, operand);
1845 }
1846}
1847
1848
1849void LCodeGen::DoAddI(LAddI* instr) {
1850 LOperand* left = instr->left();
1851 LOperand* right = instr->right();
1852 LOperand* result = instr->result();
1853 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1854 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1855
1856 if (right->IsStackSlot()) {
1857 Register right_reg = EmitLoadRegister(right, ip);
1858 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1859 } else {
1860 DCHECK(right->IsRegister() || right->IsConstantOperand());
1861 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1862 }
1863
1864 if (can_overflow) {
1865 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1866 }
1867}
1868
1869
1870void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1871 LOperand* left = instr->left();
1872 LOperand* right = instr->right();
1873 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1874 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1875 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1876 Register left_reg = ToRegister(left);
1877 Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1878 ? ToOperand(right)
1879 : Operand(EmitLoadRegister(right, ip));
1880 Register result_reg = ToRegister(instr->result());
1881 __ cmp(left_reg, right_op);
1882 __ Move(result_reg, left_reg, condition);
1883 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
1884 } else {
1885 DCHECK(instr->hydrogen()->representation().IsDouble());
1886 DwVfpRegister left_reg = ToDoubleRegister(left);
1887 DwVfpRegister right_reg = ToDoubleRegister(right);
1888 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
1889 Label result_is_nan, return_left, return_right, check_zero, done;
1890 __ VFPCompareAndSetFlags(left_reg, right_reg);
1891 if (operation == HMathMinMax::kMathMin) {
1892 __ b(mi, &return_left);
1893 __ b(gt, &return_right);
1894 } else {
1895 __ b(mi, &return_right);
1896 __ b(gt, &return_left);
1897 }
1898 __ b(vs, &result_is_nan);
1899 // Left equals right => check for -0.
1900 __ VFPCompareAndSetFlags(left_reg, 0.0);
1901 if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
1902 __ b(ne, &done); // left == right != 0.
1903 } else {
1904 __ b(ne, &return_left); // left == right != 0.
1905 }
1906 // At this point, both left and right are either 0 or -0.
1907 if (operation == HMathMinMax::kMathMin) {
1908 // We could use a single 'vorr' instruction here if we had NEON support.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001909 // The algorithm is: -((-L) + (-R)), which in case of L and R being
1910 // different registers is most efficiently expressed as -((-L) - R).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001911 __ vneg(left_reg, left_reg);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001912 if (left_reg.is(right_reg)) {
1913 __ vadd(result_reg, left_reg, right_reg);
1914 } else {
1915 __ vsub(result_reg, left_reg, right_reg);
1916 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001917 __ vneg(result_reg, result_reg);
1918 } else {
1919 // Since we operate on +0 and/or -0, vadd and vand have the same effect;
1920 // the decision for vadd is easy because vand is a NEON instruction.
1921 __ vadd(result_reg, left_reg, right_reg);
1922 }
1923 __ b(&done);
1924
1925 __ bind(&result_is_nan);
1926 __ vadd(result_reg, left_reg, right_reg);
1927 __ b(&done);
1928
1929 __ bind(&return_right);
1930 __ Move(result_reg, right_reg);
1931 if (!left_reg.is(result_reg)) {
1932 __ b(&done);
1933 }
1934
1935 __ bind(&return_left);
1936 __ Move(result_reg, left_reg);
1937
1938 __ bind(&done);
1939 }
1940}
1941
1942
1943void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1944 DwVfpRegister left = ToDoubleRegister(instr->left());
1945 DwVfpRegister right = ToDoubleRegister(instr->right());
1946 DwVfpRegister result = ToDoubleRegister(instr->result());
1947 switch (instr->op()) {
1948 case Token::ADD:
1949 __ vadd(result, left, right);
1950 break;
1951 case Token::SUB:
1952 __ vsub(result, left, right);
1953 break;
1954 case Token::MUL:
1955 __ vmul(result, left, right);
1956 break;
1957 case Token::DIV:
1958 __ vdiv(result, left, right);
1959 break;
1960 case Token::MOD: {
1961 __ PrepareCallCFunction(0, 2, scratch0());
1962 __ MovToFloatParameters(left, right);
1963 __ CallCFunction(
1964 ExternalReference::mod_two_doubles_operation(isolate()),
1965 0, 2);
1966 // Move the result in the double result register.
1967 __ MovFromFloatResult(result);
1968 break;
1969 }
1970 default:
1971 UNREACHABLE();
1972 break;
1973 }
1974}
1975
1976
1977void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1978 DCHECK(ToRegister(instr->context()).is(cp));
1979 DCHECK(ToRegister(instr->left()).is(r1));
1980 DCHECK(ToRegister(instr->right()).is(r0));
1981 DCHECK(ToRegister(instr->result()).is(r0));
1982
Ben Murdoch097c5b22016-05-18 11:27:45 +01001983 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001984 // Block literal pool emission to ensure nop indicating no inlined smi code
1985 // is in the correct position.
1986 Assembler::BlockConstPoolScope block_const_pool(masm());
1987 CallCode(code, RelocInfo::CODE_TARGET, instr);
1988}
1989
1990
1991template<class InstrType>
1992void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1993 int left_block = instr->TrueDestination(chunk_);
1994 int right_block = instr->FalseDestination(chunk_);
1995
1996 int next_block = GetNextEmittedBlock();
1997
1998 if (right_block == left_block || condition == al) {
1999 EmitGoto(left_block);
2000 } else if (left_block == next_block) {
2001 __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
2002 } else if (right_block == next_block) {
2003 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2004 } else {
2005 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2006 __ b(chunk_->GetAssemblyLabel(right_block));
2007 }
2008}
2009
2010
2011template <class InstrType>
2012void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition) {
2013 int true_block = instr->TrueDestination(chunk_);
2014 __ b(condition, chunk_->GetAssemblyLabel(true_block));
2015}
2016
2017
2018template <class InstrType>
2019void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
2020 int false_block = instr->FalseDestination(chunk_);
2021 __ b(condition, chunk_->GetAssemblyLabel(false_block));
2022}
2023
2024
2025void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2026 __ stop("LBreak");
2027}
2028
2029
2030void LCodeGen::DoBranch(LBranch* instr) {
2031 Representation r = instr->hydrogen()->value()->representation();
2032 if (r.IsInteger32() || r.IsSmi()) {
2033 DCHECK(!info()->IsStub());
2034 Register reg = ToRegister(instr->value());
2035 __ cmp(reg, Operand::Zero());
2036 EmitBranch(instr, ne);
2037 } else if (r.IsDouble()) {
2038 DCHECK(!info()->IsStub());
2039 DwVfpRegister reg = ToDoubleRegister(instr->value());
2040 // Test the double value. Zero and NaN are false.
2041 __ VFPCompareAndSetFlags(reg, 0.0);
2042 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
2043 EmitBranch(instr, ne);
2044 } else {
2045 DCHECK(r.IsTagged());
2046 Register reg = ToRegister(instr->value());
2047 HType type = instr->hydrogen()->value()->type();
2048 if (type.IsBoolean()) {
2049 DCHECK(!info()->IsStub());
2050 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2051 EmitBranch(instr, eq);
2052 } else if (type.IsSmi()) {
2053 DCHECK(!info()->IsStub());
2054 __ cmp(reg, Operand::Zero());
2055 EmitBranch(instr, ne);
2056 } else if (type.IsJSArray()) {
2057 DCHECK(!info()->IsStub());
2058 EmitBranch(instr, al);
2059 } else if (type.IsHeapNumber()) {
2060 DCHECK(!info()->IsStub());
2061 DwVfpRegister dbl_scratch = double_scratch0();
2062 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2063 // Test the double value. Zero and NaN are false.
2064 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2065 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
2066 EmitBranch(instr, ne);
2067 } else if (type.IsString()) {
2068 DCHECK(!info()->IsStub());
2069 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2070 __ cmp(ip, Operand::Zero());
2071 EmitBranch(instr, ne);
2072 } else {
2073 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2074 // Avoid deopts in the case where we've never executed this path before.
2075 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2076
2077 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2078 // undefined -> false.
2079 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2080 __ b(eq, instr->FalseLabel(chunk_));
2081 }
2082 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2083 // Boolean -> its value.
2084 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2085 __ b(eq, instr->TrueLabel(chunk_));
2086 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2087 __ b(eq, instr->FalseLabel(chunk_));
2088 }
2089 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2090 // 'null' -> false.
2091 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2092 __ b(eq, instr->FalseLabel(chunk_));
2093 }
2094
2095 if (expected.Contains(ToBooleanStub::SMI)) {
2096 // Smis: 0 -> false, all other -> true.
2097 __ cmp(reg, Operand::Zero());
2098 __ b(eq, instr->FalseLabel(chunk_));
2099 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2100 } else if (expected.NeedsMap()) {
2101 // If we need a map later and have a Smi -> deopt.
2102 __ SmiTst(reg);
2103 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
2104 }
2105
2106 const Register map = scratch0();
2107 if (expected.NeedsMap()) {
2108 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2109
2110 if (expected.CanBeUndetectable()) {
2111 // Undetectable -> false.
2112 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2113 __ tst(ip, Operand(1 << Map::kIsUndetectable));
2114 __ b(ne, instr->FalseLabel(chunk_));
2115 }
2116 }
2117
2118 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2119 // spec object -> true.
2120 __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
2121 __ b(ge, instr->TrueLabel(chunk_));
2122 }
2123
2124 if (expected.Contains(ToBooleanStub::STRING)) {
2125 // String value -> false iff empty.
2126 Label not_string;
2127 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2128 __ b(ge, &not_string);
2129 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2130 __ cmp(ip, Operand::Zero());
2131 __ b(ne, instr->TrueLabel(chunk_));
2132 __ b(instr->FalseLabel(chunk_));
2133 __ bind(&not_string);
2134 }
2135
2136 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2137 // Symbol value -> true.
2138 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2139 __ b(eq, instr->TrueLabel(chunk_));
2140 }
2141
2142 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
2143 // SIMD value -> true.
2144 __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
2145 __ b(eq, instr->TrueLabel(chunk_));
2146 }
2147
2148 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2149 // heap number -> false iff +0, -0, or NaN.
2150 DwVfpRegister dbl_scratch = double_scratch0();
2151 Label not_heap_number;
2152 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2153 __ b(ne, &not_heap_number);
2154 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2155 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2156 __ cmp(r0, r0, vs); // NaN -> false.
2157 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
2158 __ b(instr->TrueLabel(chunk_));
2159 __ bind(&not_heap_number);
2160 }
2161
2162 if (!expected.IsGeneric()) {
2163 // We've seen something for the first time -> deopt.
2164 // This can only happen if we are not generic already.
2165 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
2166 }
2167 }
2168 }
2169}
2170
2171
2172void LCodeGen::EmitGoto(int block) {
2173 if (!IsNextEmittedBlock(block)) {
2174 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2175 }
2176}
2177
2178
2179void LCodeGen::DoGoto(LGoto* instr) {
2180 EmitGoto(instr->block_id());
2181}
2182
2183
2184Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2185 Condition cond = kNoCondition;
2186 switch (op) {
2187 case Token::EQ:
2188 case Token::EQ_STRICT:
2189 cond = eq;
2190 break;
2191 case Token::NE:
2192 case Token::NE_STRICT:
2193 cond = ne;
2194 break;
2195 case Token::LT:
2196 cond = is_unsigned ? lo : lt;
2197 break;
2198 case Token::GT:
2199 cond = is_unsigned ? hi : gt;
2200 break;
2201 case Token::LTE:
2202 cond = is_unsigned ? ls : le;
2203 break;
2204 case Token::GTE:
2205 cond = is_unsigned ? hs : ge;
2206 break;
2207 case Token::IN:
2208 case Token::INSTANCEOF:
2209 default:
2210 UNREACHABLE();
2211 }
2212 return cond;
2213}
2214
2215
2216void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2217 LOperand* left = instr->left();
2218 LOperand* right = instr->right();
2219 bool is_unsigned =
2220 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2221 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2222 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2223
2224 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2225 // We can statically evaluate the comparison.
2226 double left_val = ToDouble(LConstantOperand::cast(left));
2227 double right_val = ToDouble(LConstantOperand::cast(right));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002228 int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2229 ? instr->TrueDestination(chunk_)
2230 : instr->FalseDestination(chunk_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002231 EmitGoto(next_block);
2232 } else {
2233 if (instr->is_double()) {
2234 // Compare left and right operands as doubles and load the
2235 // resulting flags into the normal status register.
2236 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2237 // If a NaN is involved, i.e. the result is unordered (V set),
2238 // jump to false block label.
2239 __ b(vs, instr->FalseLabel(chunk_));
2240 } else {
2241 if (right->IsConstantOperand()) {
2242 int32_t value = ToInteger32(LConstantOperand::cast(right));
2243 if (instr->hydrogen_value()->representation().IsSmi()) {
2244 __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
2245 } else {
2246 __ cmp(ToRegister(left), Operand(value));
2247 }
2248 } else if (left->IsConstantOperand()) {
2249 int32_t value = ToInteger32(LConstantOperand::cast(left));
2250 if (instr->hydrogen_value()->representation().IsSmi()) {
2251 __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
2252 } else {
2253 __ cmp(ToRegister(right), Operand(value));
2254 }
2255 // We commuted the operands, so commute the condition.
2256 cond = CommuteCondition(cond);
2257 } else {
2258 __ cmp(ToRegister(left), ToRegister(right));
2259 }
2260 }
2261 EmitBranch(instr, cond);
2262 }
2263}
2264
2265
2266void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2267 Register left = ToRegister(instr->left());
2268 Register right = ToRegister(instr->right());
2269
2270 __ cmp(left, Operand(right));
2271 EmitBranch(instr, eq);
2272}
2273
2274
2275void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2276 if (instr->hydrogen()->representation().IsTagged()) {
2277 Register input_reg = ToRegister(instr->object());
2278 __ mov(ip, Operand(factory()->the_hole_value()));
2279 __ cmp(input_reg, ip);
2280 EmitBranch(instr, eq);
2281 return;
2282 }
2283
2284 DwVfpRegister input_reg = ToDoubleRegister(instr->object());
2285 __ VFPCompareAndSetFlags(input_reg, input_reg);
2286 EmitFalseBranch(instr, vc);
2287
2288 Register scratch = scratch0();
2289 __ VmovHigh(scratch, input_reg);
2290 __ cmp(scratch, Operand(kHoleNanUpper32));
2291 EmitBranch(instr, eq);
2292}
2293
2294
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002295Condition LCodeGen::EmitIsString(Register input,
2296 Register temp1,
2297 Label* is_not_string,
2298 SmiCheck check_needed = INLINE_SMI_CHECK) {
2299 if (check_needed == INLINE_SMI_CHECK) {
2300 __ JumpIfSmi(input, is_not_string);
2301 }
2302 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2303
2304 return lt;
2305}
2306
2307
2308void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2309 Register reg = ToRegister(instr->value());
2310 Register temp1 = ToRegister(instr->temp());
2311
2312 SmiCheck check_needed =
2313 instr->hydrogen()->value()->type().IsHeapObject()
2314 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2315 Condition true_cond =
2316 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2317
2318 EmitBranch(instr, true_cond);
2319}
2320
2321
2322void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2323 Register input_reg = EmitLoadRegister(instr->value(), ip);
2324 __ SmiTst(input_reg);
2325 EmitBranch(instr, eq);
2326}
2327
2328
2329void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2330 Register input = ToRegister(instr->value());
2331 Register temp = ToRegister(instr->temp());
2332
2333 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2334 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2335 }
2336 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2337 __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2338 __ tst(temp, Operand(1 << Map::kIsUndetectable));
2339 EmitBranch(instr, ne);
2340}
2341
2342
2343static Condition ComputeCompareCondition(Token::Value op) {
2344 switch (op) {
2345 case Token::EQ_STRICT:
2346 case Token::EQ:
2347 return eq;
2348 case Token::LT:
2349 return lt;
2350 case Token::GT:
2351 return gt;
2352 case Token::LTE:
2353 return le;
2354 case Token::GTE:
2355 return ge;
2356 default:
2357 UNREACHABLE();
2358 return kNoCondition;
2359 }
2360}
2361
2362
2363void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2364 DCHECK(ToRegister(instr->context()).is(cp));
2365 DCHECK(ToRegister(instr->left()).is(r1));
2366 DCHECK(ToRegister(instr->right()).is(r0));
2367
2368 Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
2369 CallCode(code, RelocInfo::CODE_TARGET, instr);
2370 __ cmp(r0, Operand::Zero());
2371
2372 EmitBranch(instr, ComputeCompareCondition(instr->op()));
2373}
2374
2375
2376static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2377 InstanceType from = instr->from();
2378 InstanceType to = instr->to();
2379 if (from == FIRST_TYPE) return to;
2380 DCHECK(from == to || to == LAST_TYPE);
2381 return from;
2382}
2383
2384
2385static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2386 InstanceType from = instr->from();
2387 InstanceType to = instr->to();
2388 if (from == to) return eq;
2389 if (to == LAST_TYPE) return hs;
2390 if (from == FIRST_TYPE) return ls;
2391 UNREACHABLE();
2392 return eq;
2393}
2394
2395
2396void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2397 Register scratch = scratch0();
2398 Register input = ToRegister(instr->value());
2399
2400 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2401 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2402 }
2403
2404 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2405 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2406}
2407
2408
2409void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2410 Register input = ToRegister(instr->value());
2411 Register result = ToRegister(instr->result());
2412
2413 __ AssertString(input);
2414
2415 __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2416 __ IndexFromHash(result, result);
2417}
2418
2419
2420void LCodeGen::DoHasCachedArrayIndexAndBranch(
2421 LHasCachedArrayIndexAndBranch* instr) {
2422 Register input = ToRegister(instr->value());
2423 Register scratch = scratch0();
2424
2425 __ ldr(scratch,
2426 FieldMemOperand(input, String::kHashFieldOffset));
2427 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2428 EmitBranch(instr, eq);
2429}
2430
2431
2432// Branches to a label or falls through with the answer in flags. Trashes
2433// the temp registers, but not the input.
2434void LCodeGen::EmitClassOfTest(Label* is_true,
2435 Label* is_false,
2436 Handle<String>class_name,
2437 Register input,
2438 Register temp,
2439 Register temp2) {
2440 DCHECK(!input.is(temp));
2441 DCHECK(!input.is(temp2));
2442 DCHECK(!temp.is(temp2));
2443
2444 __ JumpIfSmi(input, is_false);
2445
2446 __ CompareObjectType(input, temp, temp2, JS_FUNCTION_TYPE);
2447 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2448 __ b(eq, is_true);
2449 } else {
2450 __ b(eq, is_false);
2451 }
2452
2453 // Check if the constructor in the map is a function.
2454 Register instance_type = ip;
2455 __ GetMapConstructor(temp, temp, temp2, instance_type);
2456
2457 // Objects with a non-function constructor have class 'Object'.
2458 __ cmp(instance_type, Operand(JS_FUNCTION_TYPE));
2459 if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
2460 __ b(ne, is_true);
2461 } else {
2462 __ b(ne, is_false);
2463 }
2464
2465 // temp now contains the constructor function. Grab the
2466 // instance class name from there.
2467 __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2468 __ ldr(temp, FieldMemOperand(temp,
2469 SharedFunctionInfo::kInstanceClassNameOffset));
2470 // The class name we are testing against is internalized since it's a literal.
2471 // The name in the constructor is internalized because of the way the context
2472 // is booted. This routine isn't expected to work for random API-created
2473 // classes and it doesn't have to because you can't access it with natives
2474 // syntax. Since both sides are internalized it is sufficient to use an
2475 // identity comparison.
2476 __ cmp(temp, Operand(class_name));
2477 // End with the answer in flags.
2478}
2479
2480
2481void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2482 Register input = ToRegister(instr->value());
2483 Register temp = scratch0();
2484 Register temp2 = ToRegister(instr->temp());
2485 Handle<String> class_name = instr->hydrogen()->class_name();
2486
2487 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2488 class_name, input, temp, temp2);
2489
2490 EmitBranch(instr, eq);
2491}
2492
2493
2494void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2495 Register reg = ToRegister(instr->value());
2496 Register temp = ToRegister(instr->temp());
2497
2498 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2499 __ cmp(temp, Operand(instr->map()));
2500 EmitBranch(instr, eq);
2501}
2502
2503
2504void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2505 DCHECK(ToRegister(instr->context()).is(cp));
2506 DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
2507 DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
2508 DCHECK(ToRegister(instr->result()).is(r0));
2509 InstanceOfStub stub(isolate());
2510 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2511}
2512
2513
2514void LCodeGen::DoHasInPrototypeChainAndBranch(
2515 LHasInPrototypeChainAndBranch* instr) {
2516 Register const object = ToRegister(instr->object());
2517 Register const object_map = scratch0();
2518 Register const object_instance_type = ip;
2519 Register const object_prototype = object_map;
2520 Register const prototype = ToRegister(instr->prototype());
2521
2522 // The {object} must be a spec object. It's sufficient to know that {object}
2523 // is not a smi, since all other non-spec objects have {null} prototypes and
2524 // will be ruled out below.
2525 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2526 __ SmiTst(object);
2527 EmitFalseBranch(instr, eq);
2528 }
2529
2530 // Loop through the {object}s prototype chain looking for the {prototype}.
2531 __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2532 Label loop;
2533 __ bind(&loop);
2534
2535 // Deoptimize if the object needs to be access checked.
2536 __ ldrb(object_instance_type,
2537 FieldMemOperand(object_map, Map::kBitFieldOffset));
2538 __ tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
2539 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck);
2540 // Deoptimize for proxies.
2541 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
2542 DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
2543
2544 __ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
2545 __ cmp(object_prototype, prototype);
2546 EmitTrueBranch(instr, eq);
2547 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
2548 EmitFalseBranch(instr, eq);
2549 __ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
2550 __ b(&loop);
2551}
2552
2553
2554void LCodeGen::DoCmpT(LCmpT* instr) {
2555 DCHECK(ToRegister(instr->context()).is(cp));
2556 Token::Value op = instr->op();
2557
Ben Murdoch097c5b22016-05-18 11:27:45 +01002558 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002559 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2560 // This instruction also signals no smi code inlined.
2561 __ cmp(r0, Operand::Zero());
2562
2563 Condition condition = ComputeCompareCondition(op);
2564 __ LoadRoot(ToRegister(instr->result()),
2565 Heap::kTrueValueRootIndex,
2566 condition);
2567 __ LoadRoot(ToRegister(instr->result()),
2568 Heap::kFalseValueRootIndex,
2569 NegateCondition(condition));
2570}
2571
2572
2573void LCodeGen::DoReturn(LReturn* instr) {
2574 if (FLAG_trace && info()->IsOptimizing()) {
2575 // Push the return value on the stack as the parameter.
2576 // Runtime::TraceExit returns its parameter in r0. We're leaving the code
2577 // managed by the register allocator and tearing down the frame, it's
2578 // safe to write to the context register.
2579 __ push(r0);
2580 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2581 __ CallRuntime(Runtime::kTraceExit);
2582 }
2583 if (info()->saves_caller_doubles()) {
2584 RestoreCallerDoubles();
2585 }
2586 if (NeedsEagerFrame()) {
2587 masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2588 }
2589 { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
2590 if (instr->has_constant_parameter_count()) {
2591 int parameter_count = ToInteger32(instr->constant_parameter_count());
2592 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2593 if (sp_delta != 0) {
2594 __ add(sp, sp, Operand(sp_delta));
2595 }
2596 } else {
2597 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2598 Register reg = ToRegister(instr->parameter_count());
2599 // The argument count parameter is a smi
2600 __ SmiUntag(reg);
2601 __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
2602 }
2603
2604 __ Jump(lr);
2605 }
2606}
2607
2608
2609template <class T>
2610void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2611 Register vector_register = ToRegister(instr->temp_vector());
2612 Register slot_register = LoadDescriptor::SlotRegister();
2613 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2614 DCHECK(slot_register.is(r0));
2615
2616 AllowDeferredHandleDereference vector_structure_check;
2617 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2618 __ Move(vector_register, vector);
2619 // No need to allocate this register.
2620 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2621 int index = vector->GetIndex(slot);
2622 __ mov(slot_register, Operand(Smi::FromInt(index)));
2623}
2624
2625
2626template <class T>
2627void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2628 Register vector_register = ToRegister(instr->temp_vector());
2629 Register slot_register = ToRegister(instr->temp_slot());
2630
2631 AllowDeferredHandleDereference vector_structure_check;
2632 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2633 __ Move(vector_register, vector);
2634 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2635 int index = vector->GetIndex(slot);
2636 __ mov(slot_register, Operand(Smi::FromInt(index)));
2637}
2638
2639
2640void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2641 DCHECK(ToRegister(instr->context()).is(cp));
2642 DCHECK(ToRegister(instr->global_object())
2643 .is(LoadDescriptor::ReceiverRegister()));
2644 DCHECK(ToRegister(instr->result()).is(r0));
2645
2646 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
2647 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002648 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2649 isolate(), instr->typeof_mode(), PREMONOMORPHIC)
2650 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002651 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2652}
2653
2654
2655void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2656 Register context = ToRegister(instr->context());
2657 Register result = ToRegister(instr->result());
2658 __ ldr(result, ContextMemOperand(context, instr->slot_index()));
2659 if (instr->hydrogen()->RequiresHoleCheck()) {
2660 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2661 __ cmp(result, ip);
2662 if (instr->hydrogen()->DeoptimizesOnHole()) {
2663 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2664 } else {
2665 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
2666 }
2667 }
2668}
2669
2670
2671void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2672 Register context = ToRegister(instr->context());
2673 Register value = ToRegister(instr->value());
2674 Register scratch = scratch0();
2675 MemOperand target = ContextMemOperand(context, instr->slot_index());
2676
2677 Label skip_assignment;
2678
2679 if (instr->hydrogen()->RequiresHoleCheck()) {
2680 __ ldr(scratch, target);
2681 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2682 __ cmp(scratch, ip);
2683 if (instr->hydrogen()->DeoptimizesOnHole()) {
2684 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2685 } else {
2686 __ b(ne, &skip_assignment);
2687 }
2688 }
2689
2690 __ str(value, target);
2691 if (instr->hydrogen()->NeedsWriteBarrier()) {
2692 SmiCheck check_needed =
2693 instr->hydrogen()->value()->type().IsHeapObject()
2694 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2695 __ RecordWriteContextSlot(context,
2696 target.offset(),
2697 value,
2698 scratch,
2699 GetLinkRegisterState(),
2700 kSaveFPRegs,
2701 EMIT_REMEMBERED_SET,
2702 check_needed);
2703 }
2704
2705 __ bind(&skip_assignment);
2706}
2707
2708
2709void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2710 HObjectAccess access = instr->hydrogen()->access();
2711 int offset = access.offset();
2712 Register object = ToRegister(instr->object());
2713
2714 if (access.IsExternalMemory()) {
2715 Register result = ToRegister(instr->result());
2716 MemOperand operand = MemOperand(object, offset);
2717 __ Load(result, operand, access.representation());
2718 return;
2719 }
2720
2721 if (instr->hydrogen()->representation().IsDouble()) {
2722 DwVfpRegister result = ToDoubleRegister(instr->result());
2723 __ vldr(result, FieldMemOperand(object, offset));
2724 return;
2725 }
2726
2727 Register result = ToRegister(instr->result());
2728 if (!access.IsInobject()) {
2729 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2730 object = result;
2731 }
2732 MemOperand operand = FieldMemOperand(object, offset);
2733 __ Load(result, operand, access.representation());
2734}
2735
2736
2737void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2738 DCHECK(ToRegister(instr->context()).is(cp));
2739 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2740 DCHECK(ToRegister(instr->result()).is(r0));
2741
2742 // Name is always in r2.
2743 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
2744 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002745 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2746 isolate(), NOT_INSIDE_TYPEOF,
2747 instr->hydrogen()->initialization_state())
2748 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002749 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
2750}
2751
2752
2753void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2754 Register scratch = scratch0();
2755 Register function = ToRegister(instr->function());
2756 Register result = ToRegister(instr->result());
2757
2758 // Get the prototype or initial map from the function.
2759 __ ldr(result,
2760 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2761
2762 // Check that the function has a prototype or an initial map.
2763 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2764 __ cmp(result, ip);
2765 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2766
2767 // If the function does not have an initial map, we're done.
2768 Label done;
2769 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2770 __ b(ne, &done);
2771
2772 // Get the prototype from the initial map.
2773 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2774
2775 // All done.
2776 __ bind(&done);
2777}
2778
2779
2780void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2781 Register result = ToRegister(instr->result());
2782 __ LoadRoot(result, instr->index());
2783}
2784
2785
2786void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2787 Register arguments = ToRegister(instr->arguments());
2788 Register result = ToRegister(instr->result());
2789 // There are two words between the frame pointer and the last argument.
2790 // Subtracting from length accounts for one of them add one more.
2791 if (instr->length()->IsConstantOperand()) {
2792 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2793 if (instr->index()->IsConstantOperand()) {
2794 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2795 int index = (const_length - const_index) + 1;
2796 __ ldr(result, MemOperand(arguments, index * kPointerSize));
2797 } else {
2798 Register index = ToRegister(instr->index());
2799 __ rsb(result, index, Operand(const_length + 1));
2800 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
2801 }
2802 } else if (instr->index()->IsConstantOperand()) {
2803 Register length = ToRegister(instr->length());
2804 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2805 int loc = const_index - 1;
2806 if (loc != 0) {
2807 __ sub(result, length, Operand(loc));
2808 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
2809 } else {
2810 __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
2811 }
2812 } else {
2813 Register length = ToRegister(instr->length());
2814 Register index = ToRegister(instr->index());
2815 __ sub(result, length, index);
2816 __ add(result, result, Operand(1));
2817 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
2818 }
2819}
2820
2821
2822void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2823 Register external_pointer = ToRegister(instr->elements());
2824 Register key = no_reg;
2825 ElementsKind elements_kind = instr->elements_kind();
2826 bool key_is_constant = instr->key()->IsConstantOperand();
2827 int constant_key = 0;
2828 if (key_is_constant) {
2829 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2830 if (constant_key & 0xF0000000) {
2831 Abort(kArrayIndexConstantValueTooBig);
2832 }
2833 } else {
2834 key = ToRegister(instr->key());
2835 }
2836 int element_size_shift = ElementsKindToShiftSize(elements_kind);
2837 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2838 ? (element_size_shift - kSmiTagSize) : element_size_shift;
2839 int base_offset = instr->base_offset();
2840
2841 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2842 DwVfpRegister result = ToDoubleRegister(instr->result());
2843 Operand operand = key_is_constant
2844 ? Operand(constant_key << element_size_shift)
2845 : Operand(key, LSL, shift_size);
2846 __ add(scratch0(), external_pointer, operand);
2847 if (elements_kind == FLOAT32_ELEMENTS) {
2848 __ vldr(double_scratch0().low(), scratch0(), base_offset);
2849 __ vcvt_f64_f32(result, double_scratch0().low());
2850 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2851 __ vldr(result, scratch0(), base_offset);
2852 }
2853 } else {
2854 Register result = ToRegister(instr->result());
2855 MemOperand mem_operand = PrepareKeyedOperand(
2856 key, external_pointer, key_is_constant, constant_key,
2857 element_size_shift, shift_size, base_offset);
2858 switch (elements_kind) {
2859 case INT8_ELEMENTS:
2860 __ ldrsb(result, mem_operand);
2861 break;
2862 case UINT8_ELEMENTS:
2863 case UINT8_CLAMPED_ELEMENTS:
2864 __ ldrb(result, mem_operand);
2865 break;
2866 case INT16_ELEMENTS:
2867 __ ldrsh(result, mem_operand);
2868 break;
2869 case UINT16_ELEMENTS:
2870 __ ldrh(result, mem_operand);
2871 break;
2872 case INT32_ELEMENTS:
2873 __ ldr(result, mem_operand);
2874 break;
2875 case UINT32_ELEMENTS:
2876 __ ldr(result, mem_operand);
2877 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2878 __ cmp(result, Operand(0x80000000));
2879 DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue);
2880 }
2881 break;
2882 case FLOAT32_ELEMENTS:
2883 case FLOAT64_ELEMENTS:
2884 case FAST_HOLEY_DOUBLE_ELEMENTS:
2885 case FAST_HOLEY_ELEMENTS:
2886 case FAST_HOLEY_SMI_ELEMENTS:
2887 case FAST_DOUBLE_ELEMENTS:
2888 case FAST_ELEMENTS:
2889 case FAST_SMI_ELEMENTS:
2890 case DICTIONARY_ELEMENTS:
2891 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2892 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002893 case FAST_STRING_WRAPPER_ELEMENTS:
2894 case SLOW_STRING_WRAPPER_ELEMENTS:
2895 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002896 UNREACHABLE();
2897 break;
2898 }
2899 }
2900}
2901
2902
2903void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2904 Register elements = ToRegister(instr->elements());
2905 bool key_is_constant = instr->key()->IsConstantOperand();
2906 Register key = no_reg;
2907 DwVfpRegister result = ToDoubleRegister(instr->result());
2908 Register scratch = scratch0();
2909
2910 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2911
2912 int base_offset = instr->base_offset();
2913 if (key_is_constant) {
2914 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2915 if (constant_key & 0xF0000000) {
2916 Abort(kArrayIndexConstantValueTooBig);
2917 }
2918 base_offset += constant_key * kDoubleSize;
2919 }
2920 __ add(scratch, elements, Operand(base_offset));
2921
2922 if (!key_is_constant) {
2923 key = ToRegister(instr->key());
2924 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2925 ? (element_size_shift - kSmiTagSize) : element_size_shift;
2926 __ add(scratch, scratch, Operand(key, LSL, shift_size));
2927 }
2928
2929 __ vldr(result, scratch, 0);
2930
2931 if (instr->hydrogen()->RequiresHoleCheck()) {
2932 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
2933 __ cmp(scratch, Operand(kHoleNanUpper32));
2934 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2935 }
2936}
2937
2938
2939void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2940 Register elements = ToRegister(instr->elements());
2941 Register result = ToRegister(instr->result());
2942 Register scratch = scratch0();
2943 Register store_base = scratch;
2944 int offset = instr->base_offset();
2945
2946 if (instr->key()->IsConstantOperand()) {
2947 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2948 offset += ToInteger32(const_operand) * kPointerSize;
2949 store_base = elements;
2950 } else {
2951 Register key = ToRegister(instr->key());
2952 // Even though the HLoadKeyed instruction forces the input
2953 // representation for the key to be an integer, the input gets replaced
2954 // during bound check elimination with the index argument to the bounds
2955 // check, which can be tagged, so that case must be handled here, too.
2956 if (instr->hydrogen()->key()->representation().IsSmi()) {
2957 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
2958 } else {
2959 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
2960 }
2961 }
2962 __ ldr(result, MemOperand(store_base, offset));
2963
2964 // Check for the hole value.
2965 if (instr->hydrogen()->RequiresHoleCheck()) {
2966 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2967 __ SmiTst(result);
2968 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
2969 } else {
2970 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2971 __ cmp(result, scratch);
2972 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2973 }
2974 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
2975 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
2976 Label done;
2977 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2978 __ cmp(result, scratch);
2979 __ b(ne, &done);
2980 if (info()->IsStub()) {
2981 // A stub can safely convert the hole to undefined only if the array
2982 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
2983 // it needs to bail out.
2984 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
2985 __ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
2986 __ cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
2987 DeoptimizeIf(ne, instr, Deoptimizer::kHole);
2988 }
2989 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2990 __ bind(&done);
2991 }
2992}
2993
2994
2995void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
2996 if (instr->is_fixed_typed_array()) {
2997 DoLoadKeyedExternalArray(instr);
2998 } else if (instr->hydrogen()->representation().IsDouble()) {
2999 DoLoadKeyedFixedDoubleArray(instr);
3000 } else {
3001 DoLoadKeyedFixedArray(instr);
3002 }
3003}
3004
3005
3006MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3007 Register base,
3008 bool key_is_constant,
3009 int constant_key,
3010 int element_size,
3011 int shift_size,
3012 int base_offset) {
3013 if (key_is_constant) {
3014 return MemOperand(base, (constant_key << element_size) + base_offset);
3015 }
3016
3017 if (base_offset == 0) {
3018 if (shift_size >= 0) {
3019 return MemOperand(base, key, LSL, shift_size);
3020 } else {
3021 DCHECK_EQ(-1, shift_size);
3022 return MemOperand(base, key, LSR, 1);
3023 }
3024 }
3025
3026 if (shift_size >= 0) {
3027 __ add(scratch0(), base, Operand(key, LSL, shift_size));
3028 return MemOperand(scratch0(), base_offset);
3029 } else {
3030 DCHECK_EQ(-1, shift_size);
3031 __ add(scratch0(), base, Operand(key, ASR, 1));
3032 return MemOperand(scratch0(), base_offset);
3033 }
3034}
3035
3036
3037void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3038 DCHECK(ToRegister(instr->context()).is(cp));
3039 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3040 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3041
3042 if (instr->hydrogen()->HasVectorAndSlot()) {
3043 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3044 }
3045
3046 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
Ben Murdoch097c5b22016-05-18 11:27:45 +01003047 isolate(), instr->hydrogen()->initialization_state())
3048 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003049 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3050}
3051
3052
3053void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3054 Register scratch = scratch0();
3055 Register result = ToRegister(instr->result());
3056
3057 if (instr->hydrogen()->from_inlined()) {
3058 __ sub(result, sp, Operand(2 * kPointerSize));
3059 } else {
3060 // Check if the calling frame is an arguments adaptor frame.
3061 Label done, adapted;
3062 __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3063 __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3064 __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3065
3066 // Result is the frame pointer for the frame if not adapted and for the real
3067 // frame below the adaptor frame if adapted.
3068 __ mov(result, fp, LeaveCC, ne);
3069 __ mov(result, scratch, LeaveCC, eq);
3070 }
3071}
3072
3073
3074void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3075 Register elem = ToRegister(instr->elements());
3076 Register result = ToRegister(instr->result());
3077
3078 Label done;
3079
3080 // If no arguments adaptor frame the number of arguments is fixed.
3081 __ cmp(fp, elem);
3082 __ mov(result, Operand(scope()->num_parameters()));
3083 __ b(eq, &done);
3084
3085 // Arguments adaptor frame present. Get argument length from there.
3086 __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3087 __ ldr(result,
3088 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3089 __ SmiUntag(result);
3090
3091 // Argument length is in result register.
3092 __ bind(&done);
3093}
3094
3095
3096void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3097 Register receiver = ToRegister(instr->receiver());
3098 Register function = ToRegister(instr->function());
3099 Register result = ToRegister(instr->result());
3100 Register scratch = scratch0();
3101
3102 // If the receiver is null or undefined, we have to pass the global
3103 // object as a receiver to normal functions. Values have to be
3104 // passed unchanged to builtins and strict-mode functions.
3105 Label global_object, result_in_receiver;
3106
3107 if (!instr->hydrogen()->known_function()) {
3108 // Do not transform the receiver to object for strict mode
3109 // functions.
3110 __ ldr(scratch,
3111 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3112 __ ldr(scratch,
3113 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3114 int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3115 __ tst(scratch, Operand(mask));
3116 __ b(ne, &result_in_receiver);
3117
3118 // Do not transform the receiver to object for builtins.
3119 __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
3120 __ b(ne, &result_in_receiver);
3121 }
3122
3123 // Normal function. Replace undefined or null with global receiver.
3124 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3125 __ cmp(receiver, scratch);
3126 __ b(eq, &global_object);
3127 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3128 __ cmp(receiver, scratch);
3129 __ b(eq, &global_object);
3130
3131 // Deoptimize if the receiver is not a JS object.
3132 __ SmiTst(receiver);
3133 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
3134 __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
3135 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
3136
3137 __ b(&result_in_receiver);
3138 __ bind(&global_object);
3139 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
3140 __ ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3141 __ ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3142
3143 if (result.is(receiver)) {
3144 __ bind(&result_in_receiver);
3145 } else {
3146 Label result_ok;
3147 __ b(&result_ok);
3148 __ bind(&result_in_receiver);
3149 __ mov(result, receiver);
3150 __ bind(&result_ok);
3151 }
3152}
3153
3154
3155void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3156 Register receiver = ToRegister(instr->receiver());
3157 Register function = ToRegister(instr->function());
3158 Register length = ToRegister(instr->length());
3159 Register elements = ToRegister(instr->elements());
3160 Register scratch = scratch0();
3161 DCHECK(receiver.is(r0)); // Used for parameter count.
3162 DCHECK(function.is(r1)); // Required by InvokeFunction.
3163 DCHECK(ToRegister(instr->result()).is(r0));
3164
3165 // Copy the arguments to this function possibly from the
3166 // adaptor frame below it.
3167 const uint32_t kArgumentsLimit = 1 * KB;
3168 __ cmp(length, Operand(kArgumentsLimit));
3169 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
3170
3171 // Push the receiver and use the register to keep the original
3172 // number of arguments.
3173 __ push(receiver);
3174 __ mov(receiver, length);
3175 // The arguments are at a one pointer size offset from elements.
3176 __ add(elements, elements, Operand(1 * kPointerSize));
3177
3178 // Loop through the arguments pushing them onto the execution
3179 // stack.
3180 Label invoke, loop;
3181 // length is a small non-negative integer, due to the test above.
3182 __ cmp(length, Operand::Zero());
3183 __ b(eq, &invoke);
3184 __ bind(&loop);
3185 __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3186 __ push(scratch);
3187 __ sub(length, length, Operand(1), SetCC);
3188 __ b(ne, &loop);
3189
3190 __ bind(&invoke);
3191 DCHECK(instr->HasPointerMap());
3192 LPointerMap* pointers = instr->pointer_map();
3193 SafepointGenerator safepoint_generator(
3194 this, pointers, Safepoint::kLazyDeopt);
3195 // The number of arguments is stored in receiver which is r0, as expected
3196 // by InvokeFunction.
3197 ParameterCount actual(receiver);
3198 __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
3199 safepoint_generator);
3200}
3201
3202
3203void LCodeGen::DoPushArgument(LPushArgument* instr) {
3204 LOperand* argument = instr->value();
3205 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3206 Abort(kDoPushArgumentNotImplementedForDoubleType);
3207 } else {
3208 Register argument_reg = EmitLoadRegister(argument, ip);
3209 __ push(argument_reg);
3210 }
3211}
3212
3213
3214void LCodeGen::DoDrop(LDrop* instr) {
3215 __ Drop(instr->count());
3216}
3217
3218
3219void LCodeGen::DoThisFunction(LThisFunction* instr) {
3220 Register result = ToRegister(instr->result());
3221 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3222}
3223
3224
3225void LCodeGen::DoContext(LContext* instr) {
3226 // If there is a non-return use, the context must be moved to a register.
3227 Register result = ToRegister(instr->result());
3228 if (info()->IsOptimizing()) {
3229 __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3230 } else {
3231 // If there is no frame, the context must be in cp.
3232 DCHECK(result.is(cp));
3233 }
3234}
3235
3236
3237void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3238 DCHECK(ToRegister(instr->context()).is(cp));
3239 __ Move(scratch0(), instr->hydrogen()->pairs());
3240 __ push(scratch0());
3241 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3242 __ push(scratch0());
3243 CallRuntime(Runtime::kDeclareGlobals, instr);
3244}
3245
3246
3247void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3248 int formal_parameter_count, int arity,
3249 LInstruction* instr) {
3250 bool dont_adapt_arguments =
3251 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3252 bool can_invoke_directly =
3253 dont_adapt_arguments || formal_parameter_count == arity;
3254
3255 Register function_reg = r1;
3256
3257 LPointerMap* pointers = instr->pointer_map();
3258
3259 if (can_invoke_directly) {
3260 // Change context.
3261 __ ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3262
3263 // Always initialize new target and number of actual arguments.
3264 __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
3265 __ mov(r0, Operand(arity));
3266
3267 // Invoke function.
3268 __ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3269 __ Call(ip);
3270
3271 // Set up deoptimization.
3272 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3273 } else {
3274 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3275 ParameterCount count(arity);
3276 ParameterCount expected(formal_parameter_count);
3277 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3278 }
3279}
3280
3281
3282void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3283 DCHECK(instr->context() != NULL);
3284 DCHECK(ToRegister(instr->context()).is(cp));
3285 Register input = ToRegister(instr->value());
3286 Register result = ToRegister(instr->result());
3287 Register scratch = scratch0();
3288
3289 // Deoptimize if not a heap number.
3290 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3291 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3292 __ cmp(scratch, Operand(ip));
3293 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3294
3295 Label done;
3296 Register exponent = scratch0();
3297 scratch = no_reg;
3298 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3299 // Check the sign of the argument. If the argument is positive, just
3300 // return it.
3301 __ tst(exponent, Operand(HeapNumber::kSignMask));
3302 // Move the input to the result if necessary.
3303 __ Move(result, input);
3304 __ b(eq, &done);
3305
3306 // Input is negative. Reverse its sign.
3307 // Preserve the value of all registers.
3308 {
3309 PushSafepointRegistersScope scope(this);
3310
3311 // Registers were saved at the safepoint, so we can use
3312 // many scratch registers.
3313 Register tmp1 = input.is(r1) ? r0 : r1;
3314 Register tmp2 = input.is(r2) ? r0 : r2;
3315 Register tmp3 = input.is(r3) ? r0 : r3;
3316 Register tmp4 = input.is(r4) ? r0 : r4;
3317
3318 // exponent: floating point exponent value.
3319
3320 Label allocated, slow;
3321 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3322 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3323 __ b(&allocated);
3324
3325 // Slow case: Call the runtime system to do the number allocation.
3326 __ bind(&slow);
3327
3328 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3329 instr->context());
3330 // Set the pointer to the new heap number in tmp.
3331 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3332 // Restore input_reg after call to runtime.
3333 __ LoadFromSafepointRegisterSlot(input, input);
3334 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3335
3336 __ bind(&allocated);
3337 // exponent: floating point exponent value.
3338 // tmp1: allocated heap number.
3339 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3340 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3341 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3342 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3343
3344 __ StoreToSafepointRegisterSlot(tmp1, result);
3345 }
3346
3347 __ bind(&done);
3348}
3349
3350
3351void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3352 Register input = ToRegister(instr->value());
3353 Register result = ToRegister(instr->result());
3354 __ cmp(input, Operand::Zero());
3355 __ Move(result, input, pl);
3356 // We can make rsb conditional because the previous cmp instruction
3357 // will clear the V (overflow) flag and rsb won't set this flag
3358 // if input is positive.
3359 __ rsb(result, input, Operand::Zero(), SetCC, mi);
3360 // Deoptimize on overflow.
3361 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
3362}
3363
3364
3365void LCodeGen::DoMathAbs(LMathAbs* instr) {
3366 // Class for deferred case.
3367 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3368 public:
3369 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3370 : LDeferredCode(codegen), instr_(instr) { }
3371 void Generate() override {
3372 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3373 }
3374 LInstruction* instr() override { return instr_; }
3375
3376 private:
3377 LMathAbs* instr_;
3378 };
3379
3380 Representation r = instr->hydrogen()->value()->representation();
3381 if (r.IsDouble()) {
3382 DwVfpRegister input = ToDoubleRegister(instr->value());
3383 DwVfpRegister result = ToDoubleRegister(instr->result());
3384 __ vabs(result, input);
3385 } else if (r.IsSmiOrInteger32()) {
3386 EmitIntegerMathAbs(instr);
3387 } else {
3388 // Representation is tagged.
3389 DeferredMathAbsTaggedHeapNumber* deferred =
3390 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3391 Register input = ToRegister(instr->value());
3392 // Smi check.
3393 __ JumpIfNotSmi(input, deferred->entry());
3394 // If smi, handle it directly.
3395 EmitIntegerMathAbs(instr);
3396 __ bind(deferred->exit());
3397 }
3398}
3399
3400
3401void LCodeGen::DoMathFloor(LMathFloor* instr) {
3402 DwVfpRegister input = ToDoubleRegister(instr->value());
3403 Register result = ToRegister(instr->result());
3404 Register input_high = scratch0();
3405 Label done, exact;
3406
3407 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
3408 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3409
3410 __ bind(&exact);
3411 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3412 // Test for -0.
3413 __ cmp(result, Operand::Zero());
3414 __ b(ne, &done);
3415 __ cmp(input_high, Operand::Zero());
3416 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
3417 }
3418 __ bind(&done);
3419}
3420
3421
3422void LCodeGen::DoMathRound(LMathRound* instr) {
3423 DwVfpRegister input = ToDoubleRegister(instr->value());
3424 Register result = ToRegister(instr->result());
3425 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3426 DwVfpRegister input_plus_dot_five = double_scratch1;
3427 Register input_high = scratch0();
3428 DwVfpRegister dot_five = double_scratch0();
3429 Label convert, done;
3430
3431 __ Vmov(dot_five, 0.5, scratch0());
3432 __ vabs(double_scratch1, input);
3433 __ VFPCompareAndSetFlags(double_scratch1, dot_five);
3434 // If input is in [-0.5, -0], the result is -0.
3435 // If input is in [+0, +0.5[, the result is +0.
3436 // If the input is +0.5, the result is 1.
3437 __ b(hi, &convert); // Out of [-0.5, +0.5].
3438 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3439 __ VmovHigh(input_high, input);
3440 __ cmp(input_high, Operand::Zero());
3441 // [-0.5, -0].
3442 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
3443 }
3444 __ VFPCompareAndSetFlags(input, dot_five);
3445 __ mov(result, Operand(1), LeaveCC, eq); // +0.5.
3446 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3447 // flag kBailoutOnMinusZero.
3448 __ mov(result, Operand::Zero(), LeaveCC, ne);
3449 __ b(&done);
3450
3451 __ bind(&convert);
3452 __ vadd(input_plus_dot_five, input, dot_five);
3453 // Reuse dot_five (double_scratch0) as we no longer need this value.
3454 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
3455 &done, &done);
3456 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3457 __ bind(&done);
3458}
3459
3460
3461void LCodeGen::DoMathFround(LMathFround* instr) {
3462 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
3463 DwVfpRegister output_reg = ToDoubleRegister(instr->result());
3464 LowDwVfpRegister scratch = double_scratch0();
3465 __ vcvt_f32_f64(scratch.low(), input_reg);
3466 __ vcvt_f64_f32(output_reg, scratch.low());
3467}
3468
3469
3470void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3471 DwVfpRegister input = ToDoubleRegister(instr->value());
3472 DwVfpRegister result = ToDoubleRegister(instr->result());
3473 __ vsqrt(result, input);
3474}
3475
3476
3477void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3478 DwVfpRegister input = ToDoubleRegister(instr->value());
3479 DwVfpRegister result = ToDoubleRegister(instr->result());
3480 DwVfpRegister temp = double_scratch0();
3481
3482 // Note that according to ECMA-262 15.8.2.13:
3483 // Math.pow(-Infinity, 0.5) == Infinity
3484 // Math.sqrt(-Infinity) == NaN
3485 Label done;
3486 __ vmov(temp, -V8_INFINITY, scratch0());
3487 __ VFPCompareAndSetFlags(input, temp);
3488 __ vneg(result, temp, eq);
3489 __ b(&done, eq);
3490
3491 // Add +0 to convert -0 to +0.
3492 __ vadd(result, input, kDoubleRegZero);
3493 __ vsqrt(result, result);
3494 __ bind(&done);
3495}
3496
3497
3498void LCodeGen::DoPower(LPower* instr) {
3499 Representation exponent_type = instr->hydrogen()->right()->representation();
3500 // Having marked this as a call, we can use any registers.
3501 // Just make sure that the input/output registers are the expected ones.
3502 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3503 DCHECK(!instr->right()->IsDoubleRegister() ||
3504 ToDoubleRegister(instr->right()).is(d1));
3505 DCHECK(!instr->right()->IsRegister() ||
3506 ToRegister(instr->right()).is(tagged_exponent));
3507 DCHECK(ToDoubleRegister(instr->left()).is(d0));
3508 DCHECK(ToDoubleRegister(instr->result()).is(d2));
3509
3510 if (exponent_type.IsSmi()) {
3511 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3512 __ CallStub(&stub);
3513 } else if (exponent_type.IsTagged()) {
3514 Label no_deopt;
3515 __ JumpIfSmi(tagged_exponent, &no_deopt);
3516 DCHECK(!r6.is(tagged_exponent));
3517 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3518 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3519 __ cmp(r6, Operand(ip));
3520 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3521 __ bind(&no_deopt);
3522 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3523 __ CallStub(&stub);
3524 } else if (exponent_type.IsInteger32()) {
3525 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3526 __ CallStub(&stub);
3527 } else {
3528 DCHECK(exponent_type.IsDouble());
3529 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3530 __ CallStub(&stub);
3531 }
3532}
3533
3534
3535void LCodeGen::DoMathExp(LMathExp* instr) {
3536 DwVfpRegister input = ToDoubleRegister(instr->value());
3537 DwVfpRegister result = ToDoubleRegister(instr->result());
3538 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3539 DwVfpRegister double_scratch2 = double_scratch0();
3540 Register temp1 = ToRegister(instr->temp1());
3541 Register temp2 = ToRegister(instr->temp2());
3542
3543 MathExpGenerator::EmitMathExp(
3544 masm(), input, result, double_scratch1, double_scratch2,
3545 temp1, temp2, scratch0());
3546}
3547
3548
3549void LCodeGen::DoMathLog(LMathLog* instr) {
3550 __ PrepareCallCFunction(0, 1, scratch0());
3551 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3552 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3553 0, 1);
3554 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3555}
3556
3557
3558void LCodeGen::DoMathClz32(LMathClz32* instr) {
3559 Register input = ToRegister(instr->value());
3560 Register result = ToRegister(instr->result());
3561 __ clz(result, input);
3562}
3563
3564
3565void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3566 DCHECK(ToRegister(instr->context()).is(cp));
3567 DCHECK(ToRegister(instr->function()).is(r1));
3568 DCHECK(instr->HasPointerMap());
3569
3570 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3571 if (known_function.is_null()) {
3572 LPointerMap* pointers = instr->pointer_map();
3573 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3574 ParameterCount count(instr->arity());
3575 __ InvokeFunction(r1, no_reg, count, CALL_FUNCTION, generator);
3576 } else {
3577 CallKnownFunction(known_function,
3578 instr->hydrogen()->formal_parameter_count(),
3579 instr->arity(), instr);
3580 }
3581}
3582
3583
3584void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3585 DCHECK(ToRegister(instr->result()).is(r0));
3586
3587 if (instr->hydrogen()->IsTailCall()) {
3588 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3589
3590 if (instr->target()->IsConstantOperand()) {
3591 LConstantOperand* target = LConstantOperand::cast(instr->target());
3592 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3593 __ Jump(code, RelocInfo::CODE_TARGET);
3594 } else {
3595 DCHECK(instr->target()->IsRegister());
3596 Register target = ToRegister(instr->target());
3597 // Make sure we don't emit any additional entries in the constant pool
3598 // before the call to ensure that the CallCodeSize() calculated the
3599 // correct
3600 // number of instructions for the constant pool load.
3601 {
3602 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
3603 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3604 }
3605 __ Jump(target);
3606 }
3607 } else {
3608 LPointerMap* pointers = instr->pointer_map();
3609 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3610
3611 if (instr->target()->IsConstantOperand()) {
3612 LConstantOperand* target = LConstantOperand::cast(instr->target());
3613 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3614 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3615 PlatformInterfaceDescriptor* call_descriptor =
3616 instr->descriptor().platform_specific_descriptor();
3617 if (call_descriptor != NULL) {
3618 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
3619 call_descriptor->storage_mode());
3620 } else {
3621 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al);
3622 }
3623 } else {
3624 DCHECK(instr->target()->IsRegister());
3625 Register target = ToRegister(instr->target());
3626 generator.BeforeCall(__ CallSize(target));
3627 // Make sure we don't emit any additional entries in the constant pool
3628 // before the call to ensure that the CallCodeSize() calculated the
3629 // correct
3630 // number of instructions for the constant pool load.
3631 {
3632 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
3633 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3634 }
3635 __ Call(target);
3636 }
3637 generator.AfterCall();
3638 }
3639}
3640
3641
3642void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3643 DCHECK(ToRegister(instr->function()).is(r1));
3644 DCHECK(ToRegister(instr->result()).is(r0));
3645
3646 // Change context.
3647 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
3648
3649 // Always initialize new target and number of actual arguments.
3650 __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
3651 __ mov(r0, Operand(instr->arity()));
3652
3653 // Load the code entry address
3654 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
3655 __ Call(ip);
3656
3657 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3658}
3659
3660
3661void LCodeGen::DoCallFunction(LCallFunction* instr) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003662 HCallFunction* hinstr = instr->hydrogen();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003663 DCHECK(ToRegister(instr->context()).is(cp));
3664 DCHECK(ToRegister(instr->function()).is(r1));
3665 DCHECK(ToRegister(instr->result()).is(r0));
3666
3667 int arity = instr->arity();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003668 ConvertReceiverMode mode = hinstr->convert_mode();
3669 if (hinstr->HasVectorAndSlot()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003670 Register slot_register = ToRegister(instr->temp_slot());
3671 Register vector_register = ToRegister(instr->temp_vector());
3672 DCHECK(slot_register.is(r3));
3673 DCHECK(vector_register.is(r2));
3674
3675 AllowDeferredHandleDereference vector_structure_check;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003676 Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
3677 int index = vector->GetIndex(hinstr->slot());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003678
3679 __ Move(vector_register, vector);
3680 __ mov(slot_register, Operand(Smi::FromInt(index)));
3681
3682 Handle<Code> ic =
3683 CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
3684 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3685 } else {
3686 __ mov(r0, Operand(arity));
3687 CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
3688 }
3689}
3690
3691
3692void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3693 DCHECK(ToRegister(instr->context()).is(cp));
3694 DCHECK(ToRegister(instr->constructor()).is(r1));
3695 DCHECK(ToRegister(instr->result()).is(r0));
3696
3697 __ mov(r0, Operand(instr->arity()));
3698 if (instr->arity() == 1) {
3699 // We only need the allocation site for the case we have a length argument.
3700 // The case may bail out to the runtime, which will determine the correct
3701 // elements kind with the site.
3702 __ Move(r2, instr->hydrogen()->site());
3703 } else {
3704 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
3705 }
3706 ElementsKind kind = instr->hydrogen()->elements_kind();
3707 AllocationSiteOverrideMode override_mode =
3708 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3709 ? DISABLE_ALLOCATION_SITES
3710 : DONT_OVERRIDE;
3711
3712 if (instr->arity() == 0) {
3713 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3714 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3715 } else if (instr->arity() == 1) {
3716 Label done;
3717 if (IsFastPackedElementsKind(kind)) {
3718 Label packed_case;
3719 // We might need a change here
3720 // look at the first argument
3721 __ ldr(r5, MemOperand(sp, 0));
3722 __ cmp(r5, Operand::Zero());
3723 __ b(eq, &packed_case);
3724
3725 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3726 ArraySingleArgumentConstructorStub stub(isolate(),
3727 holey_kind,
3728 override_mode);
3729 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3730 __ jmp(&done);
3731 __ bind(&packed_case);
3732 }
3733
3734 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3735 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3736 __ bind(&done);
3737 } else {
3738 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
3739 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3740 }
3741}
3742
3743
3744void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3745 CallRuntime(instr->function(), instr->arity(), instr);
3746}
3747
3748
3749void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3750 Register function = ToRegister(instr->function());
3751 Register code_object = ToRegister(instr->code_object());
3752 __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
3753 __ str(code_object,
3754 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
3755}
3756
3757
3758void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3759 Register result = ToRegister(instr->result());
3760 Register base = ToRegister(instr->base_object());
3761 if (instr->offset()->IsConstantOperand()) {
3762 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3763 __ add(result, base, Operand(ToInteger32(offset)));
3764 } else {
3765 Register offset = ToRegister(instr->offset());
3766 __ add(result, base, offset);
3767 }
3768}
3769
3770
3771void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3772 Representation representation = instr->representation();
3773
3774 Register object = ToRegister(instr->object());
3775 Register scratch = scratch0();
3776 HObjectAccess access = instr->hydrogen()->access();
3777 int offset = access.offset();
3778
3779 if (access.IsExternalMemory()) {
3780 Register value = ToRegister(instr->value());
3781 MemOperand operand = MemOperand(object, offset);
3782 __ Store(value, operand, representation);
3783 return;
3784 }
3785
3786 __ AssertNotSmi(object);
3787
3788 DCHECK(!representation.IsSmi() ||
3789 !instr->value()->IsConstantOperand() ||
3790 IsSmi(LConstantOperand::cast(instr->value())));
3791 if (representation.IsDouble()) {
3792 DCHECK(access.IsInobject());
3793 DCHECK(!instr->hydrogen()->has_transition());
3794 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3795 DwVfpRegister value = ToDoubleRegister(instr->value());
3796 __ vstr(value, FieldMemOperand(object, offset));
3797 return;
3798 }
3799
3800 if (instr->hydrogen()->has_transition()) {
3801 Handle<Map> transition = instr->hydrogen()->transition_map();
3802 AddDeprecationDependency(transition);
3803 __ mov(scratch, Operand(transition));
3804 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3805 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3806 Register temp = ToRegister(instr->temp());
3807 // Update the write barrier for the map field.
3808 __ RecordWriteForMap(object,
3809 scratch,
3810 temp,
3811 GetLinkRegisterState(),
3812 kSaveFPRegs);
3813 }
3814 }
3815
3816 // Do the store.
3817 Register value = ToRegister(instr->value());
3818 if (access.IsInobject()) {
3819 MemOperand operand = FieldMemOperand(object, offset);
3820 __ Store(value, operand, representation);
3821 if (instr->hydrogen()->NeedsWriteBarrier()) {
3822 // Update the write barrier for the object for in-object properties.
3823 __ RecordWriteField(object,
3824 offset,
3825 value,
3826 scratch,
3827 GetLinkRegisterState(),
3828 kSaveFPRegs,
3829 EMIT_REMEMBERED_SET,
3830 instr->hydrogen()->SmiCheckForWriteBarrier(),
3831 instr->hydrogen()->PointersToHereCheckForValue());
3832 }
3833 } else {
3834 __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3835 MemOperand operand = FieldMemOperand(scratch, offset);
3836 __ Store(value, operand, representation);
3837 if (instr->hydrogen()->NeedsWriteBarrier()) {
3838 // Update the write barrier for the properties array.
3839 // object is used as a scratch register.
3840 __ RecordWriteField(scratch,
3841 offset,
3842 value,
3843 object,
3844 GetLinkRegisterState(),
3845 kSaveFPRegs,
3846 EMIT_REMEMBERED_SET,
3847 instr->hydrogen()->SmiCheckForWriteBarrier(),
3848 instr->hydrogen()->PointersToHereCheckForValue());
3849 }
3850 }
3851}
3852
3853
3854void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3855 DCHECK(ToRegister(instr->context()).is(cp));
3856 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
3857 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
3858
3859 if (instr->hydrogen()->HasVectorAndSlot()) {
3860 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
3861 }
3862
3863 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
3864 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
3865 isolate(), instr->language_mode(),
3866 instr->hydrogen()->initialization_state()).code();
3867 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3868}
3869
3870
3871void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3872 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
3873 if (instr->index()->IsConstantOperand()) {
3874 Operand index = ToOperand(instr->index());
3875 Register length = ToRegister(instr->length());
3876 __ cmp(length, index);
3877 cc = CommuteCondition(cc);
3878 } else {
3879 Register index = ToRegister(instr->index());
3880 Operand length = ToOperand(instr->length());
3881 __ cmp(index, length);
3882 }
3883 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
3884 Label done;
3885 __ b(NegateCondition(cc), &done);
3886 __ stop("eliminated bounds check failed");
3887 __ bind(&done);
3888 } else {
3889 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
3890 }
3891}
3892
3893
3894void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
3895 Register external_pointer = ToRegister(instr->elements());
3896 Register key = no_reg;
3897 ElementsKind elements_kind = instr->elements_kind();
3898 bool key_is_constant = instr->key()->IsConstantOperand();
3899 int constant_key = 0;
3900 if (key_is_constant) {
3901 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3902 if (constant_key & 0xF0000000) {
3903 Abort(kArrayIndexConstantValueTooBig);
3904 }
3905 } else {
3906 key = ToRegister(instr->key());
3907 }
3908 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3909 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3910 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3911 int base_offset = instr->base_offset();
3912
3913 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
3914 Register address = scratch0();
3915 DwVfpRegister value(ToDoubleRegister(instr->value()));
3916 if (key_is_constant) {
3917 if (constant_key != 0) {
3918 __ add(address, external_pointer,
3919 Operand(constant_key << element_size_shift));
3920 } else {
3921 address = external_pointer;
3922 }
3923 } else {
3924 __ add(address, external_pointer, Operand(key, LSL, shift_size));
3925 }
3926 if (elements_kind == FLOAT32_ELEMENTS) {
3927 __ vcvt_f32_f64(double_scratch0().low(), value);
3928 __ vstr(double_scratch0().low(), address, base_offset);
3929 } else { // Storing doubles, not floats.
3930 __ vstr(value, address, base_offset);
3931 }
3932 } else {
3933 Register value(ToRegister(instr->value()));
3934 MemOperand mem_operand = PrepareKeyedOperand(
3935 key, external_pointer, key_is_constant, constant_key,
3936 element_size_shift, shift_size,
3937 base_offset);
3938 switch (elements_kind) {
3939 case UINT8_ELEMENTS:
3940 case UINT8_CLAMPED_ELEMENTS:
3941 case INT8_ELEMENTS:
3942 __ strb(value, mem_operand);
3943 break;
3944 case INT16_ELEMENTS:
3945 case UINT16_ELEMENTS:
3946 __ strh(value, mem_operand);
3947 break;
3948 case INT32_ELEMENTS:
3949 case UINT32_ELEMENTS:
3950 __ str(value, mem_operand);
3951 break;
3952 case FLOAT32_ELEMENTS:
3953 case FLOAT64_ELEMENTS:
3954 case FAST_DOUBLE_ELEMENTS:
3955 case FAST_ELEMENTS:
3956 case FAST_SMI_ELEMENTS:
3957 case FAST_HOLEY_DOUBLE_ELEMENTS:
3958 case FAST_HOLEY_ELEMENTS:
3959 case FAST_HOLEY_SMI_ELEMENTS:
3960 case DICTIONARY_ELEMENTS:
3961 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3962 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01003963 case FAST_STRING_WRAPPER_ELEMENTS:
3964 case SLOW_STRING_WRAPPER_ELEMENTS:
3965 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003966 UNREACHABLE();
3967 break;
3968 }
3969 }
3970}
3971
3972
3973void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
3974 DwVfpRegister value = ToDoubleRegister(instr->value());
3975 Register elements = ToRegister(instr->elements());
3976 Register scratch = scratch0();
3977 DwVfpRegister double_scratch = double_scratch0();
3978 bool key_is_constant = instr->key()->IsConstantOperand();
3979 int base_offset = instr->base_offset();
3980
3981 // Calculate the effective address of the slot in the array to store the
3982 // double value.
3983 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3984 if (key_is_constant) {
3985 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3986 if (constant_key & 0xF0000000) {
3987 Abort(kArrayIndexConstantValueTooBig);
3988 }
3989 __ add(scratch, elements,
3990 Operand((constant_key << element_size_shift) + base_offset));
3991 } else {
3992 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3993 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3994 __ add(scratch, elements, Operand(base_offset));
3995 __ add(scratch, scratch,
3996 Operand(ToRegister(instr->key()), LSL, shift_size));
3997 }
3998
3999 if (instr->NeedsCanonicalization()) {
4000 // Force a canonical NaN.
4001 if (masm()->emit_debug_code()) {
4002 __ vmrs(ip);
4003 __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
4004 __ Assert(ne, kDefaultNaNModeNotSet);
4005 }
4006 __ VFPCanonicalizeNaN(double_scratch, value);
4007 __ vstr(double_scratch, scratch, 0);
4008 } else {
4009 __ vstr(value, scratch, 0);
4010 }
4011}
4012
4013
4014void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4015 Register value = ToRegister(instr->value());
4016 Register elements = ToRegister(instr->elements());
4017 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4018 : no_reg;
4019 Register scratch = scratch0();
4020 Register store_base = scratch;
4021 int offset = instr->base_offset();
4022
4023 // Do the store.
4024 if (instr->key()->IsConstantOperand()) {
4025 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4026 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4027 offset += ToInteger32(const_operand) * kPointerSize;
4028 store_base = elements;
4029 } else {
4030 // Even though the HLoadKeyed instruction forces the input
4031 // representation for the key to be an integer, the input gets replaced
4032 // during bound check elimination with the index argument to the bounds
4033 // check, which can be tagged, so that case must be handled here, too.
4034 if (instr->hydrogen()->key()->representation().IsSmi()) {
4035 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
4036 } else {
4037 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
4038 }
4039 }
4040 __ str(value, MemOperand(store_base, offset));
4041
4042 if (instr->hydrogen()->NeedsWriteBarrier()) {
4043 SmiCheck check_needed =
4044 instr->hydrogen()->value()->type().IsHeapObject()
4045 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4046 // Compute address of modified element and store it into key register.
4047 __ add(key, store_base, Operand(offset));
4048 __ RecordWrite(elements,
4049 key,
4050 value,
4051 GetLinkRegisterState(),
4052 kSaveFPRegs,
4053 EMIT_REMEMBERED_SET,
4054 check_needed,
4055 instr->hydrogen()->PointersToHereCheckForValue());
4056 }
4057}
4058
4059
4060void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4061 // By cases: external, fast double
4062 if (instr->is_fixed_typed_array()) {
4063 DoStoreKeyedExternalArray(instr);
4064 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4065 DoStoreKeyedFixedDoubleArray(instr);
4066 } else {
4067 DoStoreKeyedFixedArray(instr);
4068 }
4069}
4070
4071
4072void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4073 DCHECK(ToRegister(instr->context()).is(cp));
4074 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4075 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4076 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4077
4078 if (instr->hydrogen()->HasVectorAndSlot()) {
4079 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4080 }
4081
4082 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4083 isolate(), instr->language_mode(),
4084 instr->hydrogen()->initialization_state()).code();
4085 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4086}
4087
4088
4089void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4090 class DeferredMaybeGrowElements final : public LDeferredCode {
4091 public:
4092 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4093 : LDeferredCode(codegen), instr_(instr) {}
4094 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4095 LInstruction* instr() override { return instr_; }
4096
4097 private:
4098 LMaybeGrowElements* instr_;
4099 };
4100
4101 Register result = r0;
4102 DeferredMaybeGrowElements* deferred =
4103 new (zone()) DeferredMaybeGrowElements(this, instr);
4104 LOperand* key = instr->key();
4105 LOperand* current_capacity = instr->current_capacity();
4106
4107 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4108 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4109 DCHECK(key->IsConstantOperand() || key->IsRegister());
4110 DCHECK(current_capacity->IsConstantOperand() ||
4111 current_capacity->IsRegister());
4112
4113 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4114 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4115 int32_t constant_capacity =
4116 ToInteger32(LConstantOperand::cast(current_capacity));
4117 if (constant_key >= constant_capacity) {
4118 // Deferred case.
4119 __ jmp(deferred->entry());
4120 }
4121 } else if (key->IsConstantOperand()) {
4122 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4123 __ cmp(ToRegister(current_capacity), Operand(constant_key));
4124 __ b(le, deferred->entry());
4125 } else if (current_capacity->IsConstantOperand()) {
4126 int32_t constant_capacity =
4127 ToInteger32(LConstantOperand::cast(current_capacity));
4128 __ cmp(ToRegister(key), Operand(constant_capacity));
4129 __ b(ge, deferred->entry());
4130 } else {
4131 __ cmp(ToRegister(key), ToRegister(current_capacity));
4132 __ b(ge, deferred->entry());
4133 }
4134
4135 if (instr->elements()->IsRegister()) {
4136 __ Move(result, ToRegister(instr->elements()));
4137 } else {
4138 __ ldr(result, ToMemOperand(instr->elements()));
4139 }
4140
4141 __ bind(deferred->exit());
4142}
4143
4144
4145void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4146 // TODO(3095996): Get rid of this. For now, we need to make the
4147 // result register contain a valid pointer because it is already
4148 // contained in the register pointer map.
4149 Register result = r0;
4150 __ mov(result, Operand::Zero());
4151
4152 // We have to call a stub.
4153 {
4154 PushSafepointRegistersScope scope(this);
4155 if (instr->object()->IsRegister()) {
4156 __ Move(result, ToRegister(instr->object()));
4157 } else {
4158 __ ldr(result, ToMemOperand(instr->object()));
4159 }
4160
4161 LOperand* key = instr->key();
4162 if (key->IsConstantOperand()) {
4163 __ Move(r3, Operand(ToSmi(LConstantOperand::cast(key))));
4164 } else {
4165 __ Move(r3, ToRegister(key));
4166 __ SmiTag(r3);
4167 }
4168
4169 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4170 instr->hydrogen()->kind());
4171 __ CallStub(&stub);
4172 RecordSafepointWithLazyDeopt(
4173 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4174 __ StoreToSafepointRegisterSlot(result, result);
4175 }
4176
4177 // Deopt on smi, which means the elements array changed to dictionary mode.
4178 __ SmiTst(result);
4179 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
4180}
4181
4182
4183void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4184 Register object_reg = ToRegister(instr->object());
4185 Register scratch = scratch0();
4186
4187 Handle<Map> from_map = instr->original_map();
4188 Handle<Map> to_map = instr->transitioned_map();
4189 ElementsKind from_kind = instr->from_kind();
4190 ElementsKind to_kind = instr->to_kind();
4191
4192 Label not_applicable;
4193 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4194 __ cmp(scratch, Operand(from_map));
4195 __ b(ne, &not_applicable);
4196
4197 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4198 Register new_map_reg = ToRegister(instr->new_map_temp());
4199 __ mov(new_map_reg, Operand(to_map));
4200 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4201 // Write barrier.
4202 __ RecordWriteForMap(object_reg,
4203 new_map_reg,
4204 scratch,
4205 GetLinkRegisterState(),
4206 kDontSaveFPRegs);
4207 } else {
4208 DCHECK(ToRegister(instr->context()).is(cp));
4209 DCHECK(object_reg.is(r0));
4210 PushSafepointRegistersScope scope(this);
4211 __ Move(r1, to_map);
4212 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4213 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4214 __ CallStub(&stub);
4215 RecordSafepointWithRegisters(
4216 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4217 }
4218 __ bind(&not_applicable);
4219}
4220
4221
4222void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4223 Register object = ToRegister(instr->object());
4224 Register temp = ToRegister(instr->temp());
4225 Label no_memento_found;
4226 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4227 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
4228 __ bind(&no_memento_found);
4229}
4230
4231
4232void LCodeGen::DoStringAdd(LStringAdd* instr) {
4233 DCHECK(ToRegister(instr->context()).is(cp));
4234 DCHECK(ToRegister(instr->left()).is(r1));
4235 DCHECK(ToRegister(instr->right()).is(r0));
4236 StringAddStub stub(isolate(),
4237 instr->hydrogen()->flags(),
4238 instr->hydrogen()->pretenure_flag());
4239 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4240}
4241
4242
4243void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4244 class DeferredStringCharCodeAt final : public LDeferredCode {
4245 public:
4246 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4247 : LDeferredCode(codegen), instr_(instr) { }
4248 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4249 LInstruction* instr() override { return instr_; }
4250
4251 private:
4252 LStringCharCodeAt* instr_;
4253 };
4254
4255 DeferredStringCharCodeAt* deferred =
4256 new(zone()) DeferredStringCharCodeAt(this, instr);
4257
4258 StringCharLoadGenerator::Generate(masm(),
4259 ToRegister(instr->string()),
4260 ToRegister(instr->index()),
4261 ToRegister(instr->result()),
4262 deferred->entry());
4263 __ bind(deferred->exit());
4264}
4265
4266
4267void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4268 Register string = ToRegister(instr->string());
4269 Register result = ToRegister(instr->result());
4270 Register scratch = scratch0();
4271
4272 // TODO(3095996): Get rid of this. For now, we need to make the
4273 // result register contain a valid pointer because it is already
4274 // contained in the register pointer map.
4275 __ mov(result, Operand::Zero());
4276
4277 PushSafepointRegistersScope scope(this);
4278 __ push(string);
4279 // Push the index as a smi. This is safe because of the checks in
4280 // DoStringCharCodeAt above.
4281 if (instr->index()->IsConstantOperand()) {
4282 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4283 __ mov(scratch, Operand(Smi::FromInt(const_index)));
4284 __ push(scratch);
4285 } else {
4286 Register index = ToRegister(instr->index());
4287 __ SmiTag(index);
4288 __ push(index);
4289 }
4290 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4291 instr->context());
4292 __ AssertSmi(r0);
4293 __ SmiUntag(r0);
4294 __ StoreToSafepointRegisterSlot(r0, result);
4295}
4296
4297
4298void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4299 class DeferredStringCharFromCode final : public LDeferredCode {
4300 public:
4301 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4302 : LDeferredCode(codegen), instr_(instr) { }
4303 void Generate() override {
4304 codegen()->DoDeferredStringCharFromCode(instr_);
4305 }
4306 LInstruction* instr() override { return instr_; }
4307
4308 private:
4309 LStringCharFromCode* instr_;
4310 };
4311
4312 DeferredStringCharFromCode* deferred =
4313 new(zone()) DeferredStringCharFromCode(this, instr);
4314
4315 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4316 Register char_code = ToRegister(instr->char_code());
4317 Register result = ToRegister(instr->result());
4318 DCHECK(!char_code.is(result));
4319
4320 __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
4321 __ b(hi, deferred->entry());
4322 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4323 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4324 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4325 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4326 __ cmp(result, ip);
4327 __ b(eq, deferred->entry());
4328 __ bind(deferred->exit());
4329}
4330
4331
4332void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4333 Register char_code = ToRegister(instr->char_code());
4334 Register result = ToRegister(instr->result());
4335
4336 // TODO(3095996): Get rid of this. For now, we need to make the
4337 // result register contain a valid pointer because it is already
4338 // contained in the register pointer map.
4339 __ mov(result, Operand::Zero());
4340
4341 PushSafepointRegistersScope scope(this);
4342 __ SmiTag(char_code);
4343 __ push(char_code);
4344 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4345 instr->context());
4346 __ StoreToSafepointRegisterSlot(r0, result);
4347}
4348
4349
4350void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4351 LOperand* input = instr->value();
4352 DCHECK(input->IsRegister() || input->IsStackSlot());
4353 LOperand* output = instr->result();
4354 DCHECK(output->IsDoubleRegister());
4355 SwVfpRegister single_scratch = double_scratch0().low();
4356 if (input->IsStackSlot()) {
4357 Register scratch = scratch0();
4358 __ ldr(scratch, ToMemOperand(input));
4359 __ vmov(single_scratch, scratch);
4360 } else {
4361 __ vmov(single_scratch, ToRegister(input));
4362 }
4363 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4364}
4365
4366
4367void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4368 LOperand* input = instr->value();
4369 LOperand* output = instr->result();
4370
4371 SwVfpRegister flt_scratch = double_scratch0().low();
4372 __ vmov(flt_scratch, ToRegister(input));
4373 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4374}
4375
4376
4377void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4378 class DeferredNumberTagI final : public LDeferredCode {
4379 public:
4380 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4381 : LDeferredCode(codegen), instr_(instr) { }
4382 void Generate() override {
4383 codegen()->DoDeferredNumberTagIU(instr_,
4384 instr_->value(),
4385 instr_->temp1(),
4386 instr_->temp2(),
4387 SIGNED_INT32);
4388 }
4389 LInstruction* instr() override { return instr_; }
4390
4391 private:
4392 LNumberTagI* instr_;
4393 };
4394
4395 Register src = ToRegister(instr->value());
4396 Register dst = ToRegister(instr->result());
4397
4398 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4399 __ SmiTag(dst, src, SetCC);
4400 __ b(vs, deferred->entry());
4401 __ bind(deferred->exit());
4402}
4403
4404
4405void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4406 class DeferredNumberTagU final : public LDeferredCode {
4407 public:
4408 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4409 : LDeferredCode(codegen), instr_(instr) { }
4410 void Generate() override {
4411 codegen()->DoDeferredNumberTagIU(instr_,
4412 instr_->value(),
4413 instr_->temp1(),
4414 instr_->temp2(),
4415 UNSIGNED_INT32);
4416 }
4417 LInstruction* instr() override { return instr_; }
4418
4419 private:
4420 LNumberTagU* instr_;
4421 };
4422
4423 Register input = ToRegister(instr->value());
4424 Register result = ToRegister(instr->result());
4425
4426 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4427 __ cmp(input, Operand(Smi::kMaxValue));
4428 __ b(hi, deferred->entry());
4429 __ SmiTag(result, input);
4430 __ bind(deferred->exit());
4431}
4432
4433
4434void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4435 LOperand* value,
4436 LOperand* temp1,
4437 LOperand* temp2,
4438 IntegerSignedness signedness) {
4439 Label done, slow;
4440 Register src = ToRegister(value);
4441 Register dst = ToRegister(instr->result());
4442 Register tmp1 = scratch0();
4443 Register tmp2 = ToRegister(temp1);
4444 Register tmp3 = ToRegister(temp2);
4445 LowDwVfpRegister dbl_scratch = double_scratch0();
4446
4447 if (signedness == SIGNED_INT32) {
4448 // There was overflow, so bits 30 and 31 of the original integer
4449 // disagree. Try to allocate a heap number in new space and store
4450 // the value in there. If that fails, call the runtime system.
4451 if (dst.is(src)) {
4452 __ SmiUntag(src, dst);
4453 __ eor(src, src, Operand(0x80000000));
4454 }
4455 __ vmov(dbl_scratch.low(), src);
4456 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4457 } else {
4458 __ vmov(dbl_scratch.low(), src);
4459 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4460 }
4461
4462 if (FLAG_inline_new) {
4463 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4464 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4465 __ b(&done);
4466 }
4467
4468 // Slow case: Call the runtime system to do the number allocation.
4469 __ bind(&slow);
4470 {
4471 // TODO(3095996): Put a valid pointer value in the stack slot where the
4472 // result register is stored, as this register is in the pointer map, but
4473 // contains an integer value.
4474 __ mov(dst, Operand::Zero());
4475
4476 // Preserve the value of all registers.
4477 PushSafepointRegistersScope scope(this);
4478
4479 // NumberTagI and NumberTagD use the context from the frame, rather than
4480 // the environment's HContext or HInlinedContext value.
4481 // They only call Runtime::kAllocateHeapNumber.
4482 // The corresponding HChange instructions are added in a phase that does
4483 // not have easy access to the local context.
4484 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4485 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4486 RecordSafepointWithRegisters(
4487 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4488 __ sub(r0, r0, Operand(kHeapObjectTag));
4489 __ StoreToSafepointRegisterSlot(r0, dst);
4490 }
4491
4492 // Done. Put the value in dbl_scratch into the value of the allocated heap
4493 // number.
4494 __ bind(&done);
4495 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4496 __ add(dst, dst, Operand(kHeapObjectTag));
4497}
4498
4499
4500void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4501 class DeferredNumberTagD final : public LDeferredCode {
4502 public:
4503 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4504 : LDeferredCode(codegen), instr_(instr) { }
4505 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4506 LInstruction* instr() override { return instr_; }
4507
4508 private:
4509 LNumberTagD* instr_;
4510 };
4511
4512 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4513 Register scratch = scratch0();
4514 Register reg = ToRegister(instr->result());
4515 Register temp1 = ToRegister(instr->temp());
4516 Register temp2 = ToRegister(instr->temp2());
4517
4518 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4519 if (FLAG_inline_new) {
4520 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4521 // We want the untagged address first for performance
4522 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4523 DONT_TAG_RESULT);
4524 } else {
4525 __ jmp(deferred->entry());
4526 }
4527 __ bind(deferred->exit());
4528 __ vstr(input_reg, reg, HeapNumber::kValueOffset);
4529 // Now that we have finished with the object's real address tag it
4530 __ add(reg, reg, Operand(kHeapObjectTag));
4531}
4532
4533
4534void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4535 // TODO(3095996): Get rid of this. For now, we need to make the
4536 // result register contain a valid pointer because it is already
4537 // contained in the register pointer map.
4538 Register reg = ToRegister(instr->result());
4539 __ mov(reg, Operand::Zero());
4540
4541 PushSafepointRegistersScope scope(this);
4542 // NumberTagI and NumberTagD use the context from the frame, rather than
4543 // the environment's HContext or HInlinedContext value.
4544 // They only call Runtime::kAllocateHeapNumber.
4545 // The corresponding HChange instructions are added in a phase that does
4546 // not have easy access to the local context.
4547 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4548 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4549 RecordSafepointWithRegisters(
4550 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4551 __ sub(r0, r0, Operand(kHeapObjectTag));
4552 __ StoreToSafepointRegisterSlot(r0, reg);
4553}
4554
4555
4556void LCodeGen::DoSmiTag(LSmiTag* instr) {
4557 HChange* hchange = instr->hydrogen();
4558 Register input = ToRegister(instr->value());
4559 Register output = ToRegister(instr->result());
4560 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4561 hchange->value()->CheckFlag(HValue::kUint32)) {
4562 __ tst(input, Operand(0xc0000000));
4563 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
4564 }
4565 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4566 !hchange->value()->CheckFlag(HValue::kUint32)) {
4567 __ SmiTag(output, input, SetCC);
4568 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4569 } else {
4570 __ SmiTag(output, input);
4571 }
4572}
4573
4574
4575void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4576 Register input = ToRegister(instr->value());
4577 Register result = ToRegister(instr->result());
4578 if (instr->needs_check()) {
4579 STATIC_ASSERT(kHeapObjectTag == 1);
4580 // If the input is a HeapObject, SmiUntag will set the carry flag.
4581 __ SmiUntag(result, input, SetCC);
4582 DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi);
4583 } else {
4584 __ SmiUntag(result, input);
4585 }
4586}
4587
4588
4589void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4590 DwVfpRegister result_reg,
4591 NumberUntagDMode mode) {
4592 bool can_convert_undefined_to_nan =
4593 instr->hydrogen()->can_convert_undefined_to_nan();
4594 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4595
4596 Register scratch = scratch0();
4597 SwVfpRegister flt_scratch = double_scratch0().low();
4598 DCHECK(!result_reg.is(double_scratch0()));
4599 Label convert, load_smi, done;
4600 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4601 // Smi check.
4602 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4603 // Heap number map check.
4604 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4605 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4606 __ cmp(scratch, Operand(ip));
4607 if (can_convert_undefined_to_nan) {
4608 __ b(ne, &convert);
4609 } else {
4610 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4611 }
4612 // load heap number
4613 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
4614 if (deoptimize_on_minus_zero) {
4615 __ VmovLow(scratch, result_reg);
4616 __ cmp(scratch, Operand::Zero());
4617 __ b(ne, &done);
4618 __ VmovHigh(scratch, result_reg);
4619 __ cmp(scratch, Operand(HeapNumber::kSignMask));
4620 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4621 }
4622 __ jmp(&done);
4623 if (can_convert_undefined_to_nan) {
4624 __ bind(&convert);
4625 // Convert undefined (and hole) to NaN.
4626 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4627 __ cmp(input_reg, Operand(ip));
4628 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
4629 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4630 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
4631 __ jmp(&done);
4632 }
4633 } else {
4634 __ SmiUntag(scratch, input_reg);
4635 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4636 }
4637 // Smi to double register conversion
4638 __ bind(&load_smi);
4639 // scratch: untagged value of input_reg
4640 __ vmov(flt_scratch, scratch);
4641 __ vcvt_f64_s32(result_reg, flt_scratch);
4642 __ bind(&done);
4643}
4644
4645
4646void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4647 Register input_reg = ToRegister(instr->value());
4648 Register scratch1 = scratch0();
4649 Register scratch2 = ToRegister(instr->temp());
4650 LowDwVfpRegister double_scratch = double_scratch0();
4651 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4652
4653 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4654 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4655
4656 Label done;
4657
4658 // The input was optimistically untagged; revert it.
4659 // The carry flag is set when we reach this deferred code as we just executed
4660 // SmiUntag(heap_object, SetCC)
4661 STATIC_ASSERT(kHeapObjectTag == 1);
4662 __ adc(scratch2, input_reg, Operand(input_reg));
4663
4664 // Heap number map check.
4665 __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
4666 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4667 __ cmp(scratch1, Operand(ip));
4668
4669 if (instr->truncating()) {
4670 // Performs a truncating conversion of a floating point number as used by
4671 // the JS bitwise operations.
4672 Label no_heap_number, check_bools, check_false;
4673 __ b(ne, &no_heap_number);
4674 __ TruncateHeapNumberToI(input_reg, scratch2);
4675 __ b(&done);
4676
4677 // Check for Oddballs. Undefined/False is converted to zero and True to one
4678 // for truncating conversions.
4679 __ bind(&no_heap_number);
4680 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4681 __ cmp(scratch2, Operand(ip));
4682 __ b(ne, &check_bools);
4683 __ mov(input_reg, Operand::Zero());
4684 __ b(&done);
4685
4686 __ bind(&check_bools);
4687 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
4688 __ cmp(scratch2, Operand(ip));
4689 __ b(ne, &check_false);
4690 __ mov(input_reg, Operand(1));
4691 __ b(&done);
4692
4693 __ bind(&check_false);
4694 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
4695 __ cmp(scratch2, Operand(ip));
4696 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
4697 __ mov(input_reg, Operand::Zero());
4698 } else {
4699 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4700
4701 __ sub(ip, scratch2, Operand(kHeapObjectTag));
4702 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
4703 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
4704 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
4705
4706 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4707 __ cmp(input_reg, Operand::Zero());
4708 __ b(ne, &done);
4709 __ VmovHigh(scratch1, double_scratch2);
4710 __ tst(scratch1, Operand(HeapNumber::kSignMask));
4711 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
4712 }
4713 }
4714 __ bind(&done);
4715}
4716
4717
4718void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4719 class DeferredTaggedToI final : public LDeferredCode {
4720 public:
4721 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4722 : LDeferredCode(codegen), instr_(instr) { }
4723 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4724 LInstruction* instr() override { return instr_; }
4725
4726 private:
4727 LTaggedToI* instr_;
4728 };
4729
4730 LOperand* input = instr->value();
4731 DCHECK(input->IsRegister());
4732 DCHECK(input->Equals(instr->result()));
4733
4734 Register input_reg = ToRegister(input);
4735
4736 if (instr->hydrogen()->value()->representation().IsSmi()) {
4737 __ SmiUntag(input_reg);
4738 } else {
4739 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4740
4741 // Optimistically untag the input.
4742 // If the input is a HeapObject, SmiUntag will set the carry flag.
4743 __ SmiUntag(input_reg, SetCC);
4744 // Branch to deferred code if the input was tagged.
4745 // The deferred code will take care of restoring the tag.
4746 __ b(cs, deferred->entry());
4747 __ bind(deferred->exit());
4748 }
4749}
4750
4751
4752void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4753 LOperand* input = instr->value();
4754 DCHECK(input->IsRegister());
4755 LOperand* result = instr->result();
4756 DCHECK(result->IsDoubleRegister());
4757
4758 Register input_reg = ToRegister(input);
4759 DwVfpRegister result_reg = ToDoubleRegister(result);
4760
4761 HValue* value = instr->hydrogen()->value();
4762 NumberUntagDMode mode = value->representation().IsSmi()
4763 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4764
4765 EmitNumberUntagD(instr, input_reg, result_reg, mode);
4766}
4767
4768
4769void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4770 Register result_reg = ToRegister(instr->result());
4771 Register scratch1 = scratch0();
4772 DwVfpRegister double_input = ToDoubleRegister(instr->value());
4773 LowDwVfpRegister double_scratch = double_scratch0();
4774
4775 if (instr->truncating()) {
4776 __ TruncateDoubleToI(result_reg, double_input);
4777 } else {
4778 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
4779 // Deoptimize if the input wasn't a int32 (inside a double).
4780 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
4781 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4782 Label done;
4783 __ cmp(result_reg, Operand::Zero());
4784 __ b(ne, &done);
4785 __ VmovHigh(scratch1, double_input);
4786 __ tst(scratch1, Operand(HeapNumber::kSignMask));
4787 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
4788 __ bind(&done);
4789 }
4790 }
4791}
4792
4793
4794void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4795 Register result_reg = ToRegister(instr->result());
4796 Register scratch1 = scratch0();
4797 DwVfpRegister double_input = ToDoubleRegister(instr->value());
4798 LowDwVfpRegister double_scratch = double_scratch0();
4799
4800 if (instr->truncating()) {
4801 __ TruncateDoubleToI(result_reg, double_input);
4802 } else {
4803 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
4804 // Deoptimize if the input wasn't a int32 (inside a double).
4805 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
4806 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4807 Label done;
4808 __ cmp(result_reg, Operand::Zero());
4809 __ b(ne, &done);
4810 __ VmovHigh(scratch1, double_input);
4811 __ tst(scratch1, Operand(HeapNumber::kSignMask));
4812 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
4813 __ bind(&done);
4814 }
4815 }
4816 __ SmiTag(result_reg, SetCC);
4817 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4818}
4819
4820
4821void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4822 LOperand* input = instr->value();
4823 __ SmiTst(ToRegister(input));
4824 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
4825}
4826
4827
4828void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4829 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4830 LOperand* input = instr->value();
4831 __ SmiTst(ToRegister(input));
4832 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
4833 }
4834}
4835
4836
4837void LCodeGen::DoCheckArrayBufferNotNeutered(
4838 LCheckArrayBufferNotNeutered* instr) {
4839 Register view = ToRegister(instr->view());
4840 Register scratch = scratch0();
4841
4842 __ ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
4843 __ ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
4844 __ tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
4845 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
4846}
4847
4848
4849void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4850 Register input = ToRegister(instr->value());
4851 Register scratch = scratch0();
4852
4853 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
4854 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4855
4856 if (instr->hydrogen()->is_interval_check()) {
4857 InstanceType first;
4858 InstanceType last;
4859 instr->hydrogen()->GetCheckInterval(&first, &last);
4860
4861 __ cmp(scratch, Operand(first));
4862
4863 // If there is only one type in the interval check for equality.
4864 if (first == last) {
4865 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
4866 } else {
4867 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
4868 // Omit check for the last type.
4869 if (last != LAST_TYPE) {
4870 __ cmp(scratch, Operand(last));
4871 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
4872 }
4873 }
4874 } else {
4875 uint8_t mask;
4876 uint8_t tag;
4877 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4878
4879 if (base::bits::IsPowerOfTwo32(mask)) {
4880 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
4881 __ tst(scratch, Operand(mask));
4882 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
4883 } else {
4884 __ and_(scratch, scratch, Operand(mask));
4885 __ cmp(scratch, Operand(tag));
4886 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
4887 }
4888 }
4889}
4890
4891
4892void LCodeGen::DoCheckValue(LCheckValue* instr) {
4893 Register reg = ToRegister(instr->value());
4894 Handle<HeapObject> object = instr->hydrogen()->object().handle();
4895 AllowDeferredHandleDereference smi_check;
4896 if (isolate()->heap()->InNewSpace(*object)) {
4897 Register reg = ToRegister(instr->value());
4898 Handle<Cell> cell = isolate()->factory()->NewCell(object);
4899 __ mov(ip, Operand(cell));
4900 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
4901 __ cmp(reg, ip);
4902 } else {
4903 __ cmp(reg, Operand(object));
4904 }
4905 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
4906}
4907
4908
4909void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
4910 {
4911 PushSafepointRegistersScope scope(this);
4912 __ push(object);
4913 __ mov(cp, Operand::Zero());
4914 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
4915 RecordSafepointWithRegisters(
4916 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
4917 __ StoreToSafepointRegisterSlot(r0, scratch0());
4918 }
4919 __ tst(scratch0(), Operand(kSmiTagMask));
4920 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed);
4921}
4922
4923
4924void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4925 class DeferredCheckMaps final : public LDeferredCode {
4926 public:
4927 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
4928 : LDeferredCode(codegen), instr_(instr), object_(object) {
4929 SetExit(check_maps());
4930 }
4931 void Generate() override {
4932 codegen()->DoDeferredInstanceMigration(instr_, object_);
4933 }
4934 Label* check_maps() { return &check_maps_; }
4935 LInstruction* instr() override { return instr_; }
4936
4937 private:
4938 LCheckMaps* instr_;
4939 Label check_maps_;
4940 Register object_;
4941 };
4942
4943 if (instr->hydrogen()->IsStabilityCheck()) {
4944 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4945 for (int i = 0; i < maps->size(); ++i) {
4946 AddStabilityDependency(maps->at(i).handle());
4947 }
4948 return;
4949 }
4950
4951 Register map_reg = scratch0();
4952
4953 LOperand* input = instr->value();
4954 DCHECK(input->IsRegister());
4955 Register reg = ToRegister(input);
4956
4957 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
4958
4959 DeferredCheckMaps* deferred = NULL;
4960 if (instr->hydrogen()->HasMigrationTarget()) {
4961 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
4962 __ bind(deferred->check_maps());
4963 }
4964
4965 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4966 Label success;
4967 for (int i = 0; i < maps->size() - 1; i++) {
4968 Handle<Map> map = maps->at(i).handle();
4969 __ CompareMap(map_reg, map, &success);
4970 __ b(eq, &success);
4971 }
4972
4973 Handle<Map> map = maps->at(maps->size() - 1).handle();
4974 __ CompareMap(map_reg, map, &success);
4975 if (instr->hydrogen()->HasMigrationTarget()) {
4976 __ b(ne, deferred->entry());
4977 } else {
4978 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
4979 }
4980
4981 __ bind(&success);
4982}
4983
4984
4985void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4986 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
4987 Register result_reg = ToRegister(instr->result());
4988 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
4989}
4990
4991
4992void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4993 Register unclamped_reg = ToRegister(instr->unclamped());
4994 Register result_reg = ToRegister(instr->result());
4995 __ ClampUint8(result_reg, unclamped_reg);
4996}
4997
4998
4999void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5000 Register scratch = scratch0();
5001 Register input_reg = ToRegister(instr->unclamped());
5002 Register result_reg = ToRegister(instr->result());
5003 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
5004 Label is_smi, done, heap_number;
5005
5006 // Both smi and heap number cases are handled.
5007 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5008
5009 // Check for heap number
5010 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5011 __ cmp(scratch, Operand(factory()->heap_number_map()));
5012 __ b(eq, &heap_number);
5013
5014 // Check for undefined. Undefined is converted to zero for clamping
5015 // conversions.
5016 __ cmp(input_reg, Operand(factory()->undefined_value()));
5017 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
5018 __ mov(result_reg, Operand::Zero());
5019 __ jmp(&done);
5020
5021 // Heap number
5022 __ bind(&heap_number);
5023 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5024 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5025 __ jmp(&done);
5026
5027 // smi
5028 __ bind(&is_smi);
5029 __ ClampUint8(result_reg, result_reg);
5030
5031 __ bind(&done);
5032}
5033
5034
5035void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5036 DwVfpRegister value_reg = ToDoubleRegister(instr->value());
5037 Register result_reg = ToRegister(instr->result());
5038 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5039 __ VmovHigh(result_reg, value_reg);
5040 } else {
5041 __ VmovLow(result_reg, value_reg);
5042 }
5043}
5044
5045
5046void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5047 Register hi_reg = ToRegister(instr->hi());
5048 Register lo_reg = ToRegister(instr->lo());
5049 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
5050 __ VmovHigh(result_reg, hi_reg);
5051 __ VmovLow(result_reg, lo_reg);
5052}
5053
5054
5055void LCodeGen::DoAllocate(LAllocate* instr) {
5056 class DeferredAllocate final : public LDeferredCode {
5057 public:
5058 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5059 : LDeferredCode(codegen), instr_(instr) { }
5060 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5061 LInstruction* instr() override { return instr_; }
5062
5063 private:
5064 LAllocate* instr_;
5065 };
5066
5067 DeferredAllocate* deferred =
5068 new(zone()) DeferredAllocate(this, instr);
5069
5070 Register result = ToRegister(instr->result());
5071 Register scratch = ToRegister(instr->temp1());
5072 Register scratch2 = ToRegister(instr->temp2());
5073
5074 // Allocate memory for the object.
5075 AllocationFlags flags = TAG_OBJECT;
5076 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5077 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5078 }
5079 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5080 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5081 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5082 }
5083
5084 if (instr->size()->IsConstantOperand()) {
5085 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5086 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5087 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5088 } else {
5089 Register size = ToRegister(instr->size());
5090 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5091 }
5092
5093 __ bind(deferred->exit());
5094
5095 if (instr->hydrogen()->MustPrefillWithFiller()) {
5096 STATIC_ASSERT(kHeapObjectTag == 1);
5097 if (instr->size()->IsConstantOperand()) {
5098 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5099 __ mov(scratch, Operand(size - kHeapObjectTag));
5100 } else {
5101 __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5102 }
5103 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5104 Label loop;
5105 __ bind(&loop);
5106 __ sub(scratch, scratch, Operand(kPointerSize), SetCC);
5107 __ str(scratch2, MemOperand(result, scratch));
5108 __ b(ge, &loop);
5109 }
5110}
5111
5112
5113void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5114 Register result = ToRegister(instr->result());
5115
5116 // TODO(3095996): Get rid of this. For now, we need to make the
5117 // result register contain a valid pointer because it is already
5118 // contained in the register pointer map.
5119 __ mov(result, Operand(Smi::FromInt(0)));
5120
5121 PushSafepointRegistersScope scope(this);
5122 if (instr->size()->IsRegister()) {
5123 Register size = ToRegister(instr->size());
5124 DCHECK(!size.is(result));
5125 __ SmiTag(size);
5126 __ push(size);
5127 } else {
5128 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5129 if (size >= 0 && size <= Smi::kMaxValue) {
5130 __ Push(Smi::FromInt(size));
5131 } else {
5132 // We should never get here at runtime => abort
5133 __ stop("invalid allocation size");
5134 return;
5135 }
5136 }
5137
5138 int flags = AllocateDoubleAlignFlag::encode(
5139 instr->hydrogen()->MustAllocateDoubleAligned());
5140 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5141 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5142 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5143 } else {
5144 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5145 }
5146 __ Push(Smi::FromInt(flags));
5147
5148 CallRuntimeFromDeferred(
5149 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5150 __ StoreToSafepointRegisterSlot(r0, result);
5151}
5152
5153
5154void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5155 DCHECK(ToRegister(instr->value()).is(r0));
5156 __ push(r0);
5157 CallRuntime(Runtime::kToFastProperties, 1, instr);
5158}
5159
5160
5161void LCodeGen::DoTypeof(LTypeof* instr) {
5162 DCHECK(ToRegister(instr->value()).is(r3));
5163 DCHECK(ToRegister(instr->result()).is(r0));
5164 Label end, do_call;
5165 Register value_register = ToRegister(instr->value());
5166 __ JumpIfNotSmi(value_register, &do_call);
5167 __ mov(r0, Operand(isolate()->factory()->number_string()));
5168 __ jmp(&end);
5169 __ bind(&do_call);
5170 TypeofStub stub(isolate());
5171 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5172 __ bind(&end);
5173}
5174
5175
5176void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5177 Register input = ToRegister(instr->value());
5178
5179 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5180 instr->FalseLabel(chunk_),
5181 input,
5182 instr->type_literal());
5183 if (final_branch_condition != kNoCondition) {
5184 EmitBranch(instr, final_branch_condition);
5185 }
5186}
5187
5188
5189Condition LCodeGen::EmitTypeofIs(Label* true_label,
5190 Label* false_label,
5191 Register input,
5192 Handle<String> type_name) {
5193 Condition final_branch_condition = kNoCondition;
5194 Register scratch = scratch0();
5195 Factory* factory = isolate()->factory();
5196 if (String::Equals(type_name, factory->number_string())) {
5197 __ JumpIfSmi(input, true_label);
5198 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5199 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5200 final_branch_condition = eq;
5201
5202 } else if (String::Equals(type_name, factory->string_string())) {
5203 __ JumpIfSmi(input, false_label);
5204 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5205 final_branch_condition = lt;
5206
5207 } else if (String::Equals(type_name, factory->symbol_string())) {
5208 __ JumpIfSmi(input, false_label);
5209 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5210 final_branch_condition = eq;
5211
5212 } else if (String::Equals(type_name, factory->boolean_string())) {
5213 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5214 __ b(eq, true_label);
5215 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5216 final_branch_condition = eq;
5217
5218 } else if (String::Equals(type_name, factory->undefined_string())) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005219 __ CompareRoot(input, Heap::kNullValueRootIndex);
5220 __ b(eq, false_label);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005221 __ JumpIfSmi(input, false_label);
5222 // Check for undetectable objects => true.
5223 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5224 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5225 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5226 final_branch_condition = ne;
5227
5228 } else if (String::Equals(type_name, factory->function_string())) {
5229 __ JumpIfSmi(input, false_label);
5230 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5231 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5232 __ and_(scratch, scratch,
5233 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5234 __ cmp(scratch, Operand(1 << Map::kIsCallable));
5235 final_branch_condition = eq;
5236
5237 } else if (String::Equals(type_name, factory->object_string())) {
5238 __ JumpIfSmi(input, false_label);
5239 __ CompareRoot(input, Heap::kNullValueRootIndex);
5240 __ b(eq, true_label);
5241 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5242 __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
5243 __ b(lt, false_label);
5244 // Check for callable or undetectable objects => false.
5245 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5246 __ tst(scratch,
5247 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5248 final_branch_condition = eq;
5249
5250// clang-format off
5251#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5252 } else if (String::Equals(type_name, factory->type##_string())) { \
5253 __ JumpIfSmi(input, false_label); \
5254 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
5255 __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \
5256 final_branch_condition = eq;
5257 SIMD128_TYPES(SIMD128_TYPE)
5258#undef SIMD128_TYPE
5259 // clang-format on
5260
5261 } else {
5262 __ b(false_label);
5263 }
5264
5265 return final_branch_condition;
5266}
5267
5268
5269void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5270 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5271 // Ensure that we have enough space after the previous lazy-bailout
5272 // instruction for patching the code here.
5273 int current_pc = masm()->pc_offset();
5274 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5275 // Block literal pool emission for duration of padding.
5276 Assembler::BlockConstPoolScope block_const_pool(masm());
5277 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5278 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5279 while (padding_size > 0) {
5280 __ nop();
5281 padding_size -= Assembler::kInstrSize;
5282 }
5283 }
5284 }
5285 last_lazy_deopt_pc_ = masm()->pc_offset();
5286}
5287
5288
5289void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5290 last_lazy_deopt_pc_ = masm()->pc_offset();
5291 DCHECK(instr->HasEnvironment());
5292 LEnvironment* env = instr->environment();
5293 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5294 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5295}
5296
5297
5298void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5299 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5300 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5301 // needed return address), even though the implementation of LAZY and EAGER is
5302 // now identical. When LAZY is eventually completely folded into EAGER, remove
5303 // the special case below.
5304 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5305 type = Deoptimizer::LAZY;
5306 }
5307
5308 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
5309}
5310
5311
5312void LCodeGen::DoDummy(LDummy* instr) {
5313 // Nothing to see here, move on!
5314}
5315
5316
5317void LCodeGen::DoDummyUse(LDummyUse* instr) {
5318 // Nothing to see here, move on!
5319}
5320
5321
5322void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5323 PushSafepointRegistersScope scope(this);
5324 LoadContextFromDeferred(instr->context());
5325 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5326 RecordSafepointWithLazyDeopt(
5327 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5328 DCHECK(instr->HasEnvironment());
5329 LEnvironment* env = instr->environment();
5330 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5331}
5332
5333
5334void LCodeGen::DoStackCheck(LStackCheck* instr) {
5335 class DeferredStackCheck final : public LDeferredCode {
5336 public:
5337 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5338 : LDeferredCode(codegen), instr_(instr) { }
5339 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5340 LInstruction* instr() override { return instr_; }
5341
5342 private:
5343 LStackCheck* instr_;
5344 };
5345
5346 DCHECK(instr->HasEnvironment());
5347 LEnvironment* env = instr->environment();
5348 // There is no LLazyBailout instruction for stack-checks. We have to
5349 // prepare for lazy deoptimization explicitly here.
5350 if (instr->hydrogen()->is_function_entry()) {
5351 // Perform stack overflow check.
5352 Label done;
5353 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5354 __ cmp(sp, Operand(ip));
5355 __ b(hs, &done);
5356 Handle<Code> stack_check = isolate()->builtins()->StackCheck();
5357 PredictableCodeSizeScope predictable(masm());
5358 predictable.ExpectSize(CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
5359 DCHECK(instr->context()->IsRegister());
5360 DCHECK(ToRegister(instr->context()).is(cp));
5361 CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
5362 __ bind(&done);
5363 } else {
5364 DCHECK(instr->hydrogen()->is_backwards_branch());
5365 // Perform stack overflow check if this goto needs it before jumping.
5366 DeferredStackCheck* deferred_stack_check =
5367 new(zone()) DeferredStackCheck(this, instr);
5368 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5369 __ cmp(sp, Operand(ip));
5370 __ b(lo, deferred_stack_check->entry());
5371 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5372 __ bind(instr->done_label());
5373 deferred_stack_check->SetExit(instr->done_label());
5374 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5375 // Don't record a deoptimization index for the safepoint here.
5376 // This will be done explicitly when emitting call and the safepoint in
5377 // the deferred code.
5378 }
5379}
5380
5381
5382void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5383 // This is a pseudo-instruction that ensures that the environment here is
5384 // properly registered for deoptimization and records the assembler's PC
5385 // offset.
5386 LEnvironment* environment = instr->environment();
5387
5388 // If the environment were already registered, we would have no way of
5389 // backpatching it with the spill slot operands.
5390 DCHECK(!environment->HasBeenRegistered());
5391 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5392
5393 GenerateOsrPrologue();
5394}
5395
5396
5397void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005398 Label use_cache, call_runtime;
Ben Murdoch097c5b22016-05-18 11:27:45 +01005399 __ CheckEnumCache(&call_runtime);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005400
5401 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
5402 __ b(&use_cache);
5403
5404 // Get the set of properties to enumerate.
5405 __ bind(&call_runtime);
5406 __ push(r0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005407 CallRuntime(Runtime::kForInEnumerate, instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005408 __ bind(&use_cache);
5409}
5410
5411
5412void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5413 Register map = ToRegister(instr->map());
5414 Register result = ToRegister(instr->result());
5415 Label load_cache, done;
5416 __ EnumLength(result, map);
5417 __ cmp(result, Operand(Smi::FromInt(0)));
5418 __ b(ne, &load_cache);
5419 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5420 __ jmp(&done);
5421
5422 __ bind(&load_cache);
5423 __ LoadInstanceDescriptors(map, result);
5424 __ ldr(result,
5425 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5426 __ ldr(result,
5427 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5428 __ cmp(result, Operand::Zero());
5429 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
5430
5431 __ bind(&done);
5432}
5433
5434
5435void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5436 Register object = ToRegister(instr->value());
5437 Register map = ToRegister(instr->map());
5438 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5439 __ cmp(map, scratch0());
5440 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5441}
5442
5443
5444void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5445 Register result,
5446 Register object,
5447 Register index) {
5448 PushSafepointRegistersScope scope(this);
5449 __ Push(object);
5450 __ Push(index);
5451 __ mov(cp, Operand::Zero());
5452 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5453 RecordSafepointWithRegisters(
5454 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5455 __ StoreToSafepointRegisterSlot(r0, result);
5456}
5457
5458
5459void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5460 class DeferredLoadMutableDouble final : public LDeferredCode {
5461 public:
5462 DeferredLoadMutableDouble(LCodeGen* codegen,
5463 LLoadFieldByIndex* instr,
5464 Register result,
5465 Register object,
5466 Register index)
5467 : LDeferredCode(codegen),
5468 instr_(instr),
5469 result_(result),
5470 object_(object),
5471 index_(index) {
5472 }
5473 void Generate() override {
5474 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5475 }
5476 LInstruction* instr() override { return instr_; }
5477
5478 private:
5479 LLoadFieldByIndex* instr_;
5480 Register result_;
5481 Register object_;
5482 Register index_;
5483 };
5484
5485 Register object = ToRegister(instr->object());
5486 Register index = ToRegister(instr->index());
5487 Register result = ToRegister(instr->result());
5488 Register scratch = scratch0();
5489
5490 DeferredLoadMutableDouble* deferred;
5491 deferred = new(zone()) DeferredLoadMutableDouble(
5492 this, instr, result, object, index);
5493
5494 Label out_of_object, done;
5495
5496 __ tst(index, Operand(Smi::FromInt(1)));
5497 __ b(ne, deferred->entry());
5498 __ mov(index, Operand(index, ASR, 1));
5499
5500 __ cmp(index, Operand::Zero());
5501 __ b(lt, &out_of_object);
5502
5503 __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
5504 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5505
5506 __ b(&done);
5507
5508 __ bind(&out_of_object);
5509 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5510 // Index is equal to negated out of object property index plus 1.
5511 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
5512 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
5513 __ ldr(result, FieldMemOperand(scratch,
5514 FixedArray::kHeaderSize - kPointerSize));
5515 __ bind(deferred->exit());
5516 __ bind(&done);
5517}
5518
5519
5520void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5521 Register context = ToRegister(instr->context());
5522 __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
5523}
5524
5525
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005526#undef __
5527
5528} // namespace internal
5529} // namespace v8