blob: 340642a15356911fbfa459700ae2d949a0ebfdfc [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/crankshaft/arm/lithium-codegen-arm.h"
6
7#include "src/base/bits.h"
8#include "src/code-factory.h"
9#include "src/code-stubs.h"
10#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
11#include "src/crankshaft/hydrogen-osr.h"
12#include "src/ic/ic.h"
13#include "src/ic/stub-cache.h"
14#include "src/profiler/cpu-profiler.h"
15
16namespace v8 {
17namespace internal {
18
19
20class SafepointGenerator final : public CallWrapper {
21 public:
22 SafepointGenerator(LCodeGen* codegen,
23 LPointerMap* pointers,
24 Safepoint::DeoptMode mode)
25 : codegen_(codegen),
26 pointers_(pointers),
27 deopt_mode_(mode) { }
28 virtual ~SafepointGenerator() {}
29
30 void BeforeCall(int call_size) const override {}
31
32 void AfterCall() const override {
33 codegen_->RecordSafepoint(pointers_, deopt_mode_);
34 }
35
36 private:
37 LCodeGen* codegen_;
38 LPointerMap* pointers_;
39 Safepoint::DeoptMode deopt_mode_;
40};
41
42
43#define __ masm()->
44
45bool LCodeGen::GenerateCode() {
46 LPhase phase("Z_Code generation", chunk());
47 DCHECK(is_unused());
48 status_ = GENERATING;
49
50 // Open a frame scope to indicate that there is a frame on the stack. The
51 // NONE indicates that the scope shouldn't actually generate code to set up
52 // the frame (that is done in GeneratePrologue).
53 FrameScope frame_scope(masm_, StackFrame::NONE);
54
55 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
56 GenerateJumpTable() && GenerateSafepointTable();
57}
58
59
60void LCodeGen::FinishCode(Handle<Code> code) {
61 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +010062 code->set_stack_slots(GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000063 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
64 PopulateDeoptimizationData(code);
65}
66
67
68void LCodeGen::SaveCallerDoubles() {
69 DCHECK(info()->saves_caller_doubles());
70 DCHECK(NeedsEagerFrame());
71 Comment(";;; Save clobbered callee double registers");
72 int count = 0;
73 BitVector* doubles = chunk()->allocated_double_registers();
74 BitVector::Iterator save_iterator(doubles);
75 while (!save_iterator.Done()) {
76 __ vstr(DoubleRegister::from_code(save_iterator.Current()),
77 MemOperand(sp, count * kDoubleSize));
78 save_iterator.Advance();
79 count++;
80 }
81}
82
83
84void LCodeGen::RestoreCallerDoubles() {
85 DCHECK(info()->saves_caller_doubles());
86 DCHECK(NeedsEagerFrame());
87 Comment(";;; Restore clobbered callee double registers");
88 BitVector* doubles = chunk()->allocated_double_registers();
89 BitVector::Iterator save_iterator(doubles);
90 int count = 0;
91 while (!save_iterator.Done()) {
92 __ vldr(DoubleRegister::from_code(save_iterator.Current()),
93 MemOperand(sp, count * kDoubleSize));
94 save_iterator.Advance();
95 count++;
96 }
97}
98
99
100bool LCodeGen::GeneratePrologue() {
101 DCHECK(is_generating());
102
103 if (info()->IsOptimizing()) {
104 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
105
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000106 // r1: Callee's JS function.
107 // cp: Callee's context.
108 // pp: Callee's constant pool pointer (if enabled)
109 // fp: Caller's frame pointer.
110 // lr: Caller's pc.
111 }
112
113 info()->set_prologue_offset(masm_->pc_offset());
114 if (NeedsEagerFrame()) {
115 if (info()->IsStub()) {
Ben Murdochda12d292016-06-02 14:46:10 +0100116 __ StubPrologue(StackFrame::STUB);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000117 } else {
118 __ Prologue(info()->GeneratePreagedPrologue());
119 }
120 frame_is_built_ = true;
121 }
122
123 // Reserve space for the stack slots needed by the code.
124 int slots = GetStackSlotCount();
125 if (slots > 0) {
126 if (FLAG_debug_code) {
127 __ sub(sp, sp, Operand(slots * kPointerSize));
128 __ push(r0);
129 __ push(r1);
130 __ add(r0, sp, Operand(slots * kPointerSize));
131 __ mov(r1, Operand(kSlotsZapValue));
132 Label loop;
133 __ bind(&loop);
134 __ sub(r0, r0, Operand(kPointerSize));
135 __ str(r1, MemOperand(r0, 2 * kPointerSize));
136 __ cmp(r0, sp);
137 __ b(ne, &loop);
138 __ pop(r1);
139 __ pop(r0);
140 } else {
141 __ sub(sp, sp, Operand(slots * kPointerSize));
142 }
143 }
144
145 if (info()->saves_caller_doubles()) {
146 SaveCallerDoubles();
147 }
148 return !is_aborted();
149}
150
151
152void LCodeGen::DoPrologue(LPrologue* instr) {
153 Comment(";;; Prologue begin");
154
155 // Possibly allocate a local context.
156 if (info()->scope()->num_heap_slots() > 0) {
157 Comment(";;; Allocate local context");
158 bool need_write_barrier = true;
159 // Argument to NewContext is the function, which is in r1.
160 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
161 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
162 if (info()->scope()->is_script_scope()) {
163 __ push(r1);
164 __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
165 __ CallRuntime(Runtime::kNewScriptContext);
166 deopt_mode = Safepoint::kLazyDeopt;
167 } else if (slots <= FastNewContextStub::kMaximumSlots) {
168 FastNewContextStub stub(isolate(), slots);
169 __ CallStub(&stub);
170 // Result of FastNewContextStub is always in new space.
171 need_write_barrier = false;
172 } else {
173 __ push(r1);
174 __ CallRuntime(Runtime::kNewFunctionContext);
175 }
176 RecordSafepoint(deopt_mode);
177
178 // Context is returned in both r0 and cp. It replaces the context
179 // passed to us. It's saved in the stack and kept live in cp.
180 __ mov(cp, r0);
181 __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
182 // Copy any necessary parameters into the context.
183 int num_parameters = scope()->num_parameters();
184 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
185 for (int i = first_parameter; i < num_parameters; i++) {
186 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
187 if (var->IsContextSlot()) {
188 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
189 (num_parameters - 1 - i) * kPointerSize;
190 // Load parameter from stack.
191 __ ldr(r0, MemOperand(fp, parameter_offset));
192 // Store it in the context.
193 MemOperand target = ContextMemOperand(cp, var->index());
194 __ str(r0, target);
195 // Update the write barrier. This clobbers r3 and r0.
196 if (need_write_barrier) {
197 __ RecordWriteContextSlot(
198 cp,
199 target.offset(),
200 r0,
201 r3,
202 GetLinkRegisterState(),
203 kSaveFPRegs);
204 } else if (FLAG_debug_code) {
205 Label done;
206 __ JumpIfInNewSpace(cp, r0, &done);
207 __ Abort(kExpectedNewSpaceObject);
208 __ bind(&done);
209 }
210 }
211 }
212 Comment(";;; End allocate local context");
213 }
214
215 Comment(";;; Prologue end");
216}
217
218
219void LCodeGen::GenerateOsrPrologue() {
220 // Generate the OSR entry prologue at the first unknown OSR value, or if there
221 // are none, at the OSR entrypoint instruction.
222 if (osr_pc_offset_ >= 0) return;
223
224 osr_pc_offset_ = masm()->pc_offset();
225
226 // Adjust the frame size, subsuming the unoptimized frame into the
227 // optimized frame.
228 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
229 DCHECK(slots >= 0);
230 __ sub(sp, sp, Operand(slots * kPointerSize));
231}
232
233
234void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
235 if (instr->IsCall()) {
236 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
237 }
238 if (!instr->IsLazyBailout() && !instr->IsGap()) {
239 safepoints_.BumpLastLazySafepointIndex();
240 }
241}
242
243
244bool LCodeGen::GenerateDeferredCode() {
245 DCHECK(is_generating());
246 if (deferred_.length() > 0) {
247 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
248 LDeferredCode* code = deferred_[i];
249
250 HValue* value =
251 instructions_->at(code->instruction_index())->hydrogen_value();
252 RecordAndWritePosition(
253 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
254
255 Comment(";;; <@%d,#%d> "
256 "-------------------- Deferred %s --------------------",
257 code->instruction_index(),
258 code->instr()->hydrogen_value()->id(),
259 code->instr()->Mnemonic());
260 __ bind(code->entry());
261 if (NeedsDeferredFrame()) {
262 Comment(";;; Build frame");
263 DCHECK(!frame_is_built_);
264 DCHECK(info()->IsStub());
265 frame_is_built_ = true;
Ben Murdochda12d292016-06-02 14:46:10 +0100266 __ Move(scratch0(), Smi::FromInt(StackFrame::STUB));
267 __ PushCommonFrame(scratch0());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000268 Comment(";;; Deferred code");
269 }
270 code->Generate();
271 if (NeedsDeferredFrame()) {
272 Comment(";;; Destroy frame");
273 DCHECK(frame_is_built_);
Ben Murdochda12d292016-06-02 14:46:10 +0100274 __ PopCommonFrame(scratch0());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000275 frame_is_built_ = false;
276 }
277 __ jmp(code->exit());
278 }
279 }
280
281 // Force constant pool emission at the end of the deferred code to make
282 // sure that no constant pools are emitted after.
283 masm()->CheckConstPool(true, false);
284
285 return !is_aborted();
286}
287
288
289bool LCodeGen::GenerateJumpTable() {
290 // Check that the jump table is accessible from everywhere in the function
291 // code, i.e. that offsets to the table can be encoded in the 24bit signed
292 // immediate of a branch instruction.
293 // To simplify we consider the code size from the first instruction to the
294 // end of the jump table. We also don't consider the pc load delta.
295 // Each entry in the jump table generates one instruction and inlines one
296 // 32bit data after it.
297 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
298 jump_table_.length() * 7)) {
299 Abort(kGeneratedCodeIsTooLarge);
300 }
301
302 if (jump_table_.length() > 0) {
303 Label needs_frame, call_deopt_entry;
304
305 Comment(";;; -------------------- Jump table --------------------");
306 Address base = jump_table_[0].address;
307
308 Register entry_offset = scratch0();
309
310 int length = jump_table_.length();
311 for (int i = 0; i < length; i++) {
312 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
313 __ bind(&table_entry->label);
314
315 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
316 Address entry = table_entry->address;
317 DeoptComment(table_entry->deopt_info);
318
319 // Second-level deopt table entries are contiguous and small, so instead
320 // of loading the full, absolute address of each one, load an immediate
321 // offset which will be added to the base address later.
322 __ mov(entry_offset, Operand(entry - base));
323
324 if (table_entry->needs_frame) {
325 DCHECK(!info()->saves_caller_doubles());
326 Comment(";;; call deopt with frame");
Ben Murdochda12d292016-06-02 14:46:10 +0100327 __ PushCommonFrame();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000328 __ bl(&needs_frame);
329 } else {
330 __ bl(&call_deopt_entry);
331 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000332 masm()->CheckConstPool(false, false);
333 }
334
335 if (needs_frame.is_linked()) {
336 __ bind(&needs_frame);
337 // This variant of deopt can only be used with stubs. Since we don't
338 // have a function pointer to install in the stack frame that we're
339 // building, install a special marker there instead.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000340 __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
341 __ push(ip);
Ben Murdochda12d292016-06-02 14:46:10 +0100342 DCHECK(info()->IsStub());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000343 }
344
345 Comment(";;; call deopt");
346 __ bind(&call_deopt_entry);
347
348 if (info()->saves_caller_doubles()) {
349 DCHECK(info()->IsStub());
350 RestoreCallerDoubles();
351 }
352
353 // Add the base address to the offset previously loaded in entry_offset.
354 __ add(entry_offset, entry_offset,
355 Operand(ExternalReference::ForDeoptEntry(base)));
356 __ bx(entry_offset);
357 }
358
359 // Force constant pool emission at the end of the deopt jump table to make
360 // sure that no constant pools are emitted after.
361 masm()->CheckConstPool(true, false);
362
363 // The deoptimization jump table is the last part of the instruction
364 // sequence. Mark the generated code as done unless we bailed out.
365 if (!is_aborted()) status_ = DONE;
366 return !is_aborted();
367}
368
369
370bool LCodeGen::GenerateSafepointTable() {
371 DCHECK(is_done());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100372 safepoints_.Emit(masm(), GetTotalFrameSlotCount());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000373 return !is_aborted();
374}
375
376
377Register LCodeGen::ToRegister(int code) const {
378 return Register::from_code(code);
379}
380
381
382DwVfpRegister LCodeGen::ToDoubleRegister(int code) const {
383 return DwVfpRegister::from_code(code);
384}
385
386
387Register LCodeGen::ToRegister(LOperand* op) const {
388 DCHECK(op->IsRegister());
389 return ToRegister(op->index());
390}
391
392
393Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
394 if (op->IsRegister()) {
395 return ToRegister(op->index());
396 } else if (op->IsConstantOperand()) {
397 LConstantOperand* const_op = LConstantOperand::cast(op);
398 HConstant* constant = chunk_->LookupConstant(const_op);
399 Handle<Object> literal = constant->handle(isolate());
400 Representation r = chunk_->LookupLiteralRepresentation(const_op);
401 if (r.IsInteger32()) {
402 AllowDeferredHandleDereference get_number;
403 DCHECK(literal->IsNumber());
404 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
405 } else if (r.IsDouble()) {
406 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
407 } else {
408 DCHECK(r.IsSmiOrTagged());
409 __ Move(scratch, literal);
410 }
411 return scratch;
412 } else if (op->IsStackSlot()) {
413 __ ldr(scratch, ToMemOperand(op));
414 return scratch;
415 }
416 UNREACHABLE();
417 return scratch;
418}
419
420
421DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
422 DCHECK(op->IsDoubleRegister());
423 return ToDoubleRegister(op->index());
424}
425
426
427DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
428 SwVfpRegister flt_scratch,
429 DwVfpRegister dbl_scratch) {
430 if (op->IsDoubleRegister()) {
431 return ToDoubleRegister(op->index());
432 } else if (op->IsConstantOperand()) {
433 LConstantOperand* const_op = LConstantOperand::cast(op);
434 HConstant* constant = chunk_->LookupConstant(const_op);
435 Handle<Object> literal = constant->handle(isolate());
436 Representation r = chunk_->LookupLiteralRepresentation(const_op);
437 if (r.IsInteger32()) {
438 DCHECK(literal->IsNumber());
439 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
440 __ vmov(flt_scratch, ip);
441 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
442 return dbl_scratch;
443 } else if (r.IsDouble()) {
444 Abort(kUnsupportedDoubleImmediate);
445 } else if (r.IsTagged()) {
446 Abort(kUnsupportedTaggedImmediate);
447 }
448 } else if (op->IsStackSlot()) {
449 // TODO(regis): Why is vldr not taking a MemOperand?
450 // __ vldr(dbl_scratch, ToMemOperand(op));
451 MemOperand mem_op = ToMemOperand(op);
452 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
453 return dbl_scratch;
454 }
455 UNREACHABLE();
456 return dbl_scratch;
457}
458
459
460Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
461 HConstant* constant = chunk_->LookupConstant(op);
462 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
463 return constant->handle(isolate());
464}
465
466
467bool LCodeGen::IsInteger32(LConstantOperand* op) const {
468 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
469}
470
471
472bool LCodeGen::IsSmi(LConstantOperand* op) const {
473 return chunk_->LookupLiteralRepresentation(op).IsSmi();
474}
475
476
477int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
478 return ToRepresentation(op, Representation::Integer32());
479}
480
481
482int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
483 const Representation& r) const {
484 HConstant* constant = chunk_->LookupConstant(op);
485 int32_t value = constant->Integer32Value();
486 if (r.IsInteger32()) return value;
487 DCHECK(r.IsSmiOrTagged());
488 return reinterpret_cast<int32_t>(Smi::FromInt(value));
489}
490
491
492Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
493 HConstant* constant = chunk_->LookupConstant(op);
494 return Smi::FromInt(constant->Integer32Value());
495}
496
497
498double LCodeGen::ToDouble(LConstantOperand* op) const {
499 HConstant* constant = chunk_->LookupConstant(op);
500 DCHECK(constant->HasDoubleValue());
501 return constant->DoubleValue();
502}
503
504
505Operand LCodeGen::ToOperand(LOperand* op) {
506 if (op->IsConstantOperand()) {
507 LConstantOperand* const_op = LConstantOperand::cast(op);
508 HConstant* constant = chunk()->LookupConstant(const_op);
509 Representation r = chunk_->LookupLiteralRepresentation(const_op);
510 if (r.IsSmi()) {
511 DCHECK(constant->HasSmiValue());
512 return Operand(Smi::FromInt(constant->Integer32Value()));
513 } else if (r.IsInteger32()) {
514 DCHECK(constant->HasInteger32Value());
515 return Operand(constant->Integer32Value());
516 } else if (r.IsDouble()) {
517 Abort(kToOperandUnsupportedDoubleImmediate);
518 }
519 DCHECK(r.IsTagged());
520 return Operand(constant->handle(isolate()));
521 } else if (op->IsRegister()) {
522 return Operand(ToRegister(op));
523 } else if (op->IsDoubleRegister()) {
524 Abort(kToOperandIsDoubleRegisterUnimplemented);
525 return Operand::Zero();
526 }
527 // Stack slots not implemented, use ToMemOperand instead.
528 UNREACHABLE();
529 return Operand::Zero();
530}
531
532
533static int ArgumentsOffsetWithoutFrame(int index) {
534 DCHECK(index < 0);
535 return -(index + 1) * kPointerSize;
536}
537
538
539MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
540 DCHECK(!op->IsRegister());
541 DCHECK(!op->IsDoubleRegister());
542 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
543 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100544 return MemOperand(fp, FrameSlotToFPOffset(op->index()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000545 } else {
546 // Retrieve parameter without eager stack-frame relative to the
547 // stack-pointer.
548 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
549 }
550}
551
552
553MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
554 DCHECK(op->IsDoubleStackSlot());
555 if (NeedsEagerFrame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100556 return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000557 } else {
558 // Retrieve parameter without eager stack-frame relative to the
559 // stack-pointer.
560 return MemOperand(
561 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
562 }
563}
564
565
566void LCodeGen::WriteTranslation(LEnvironment* environment,
567 Translation* translation) {
568 if (environment == NULL) return;
569
570 // The translation includes one command per value in the environment.
571 int translation_size = environment->translation_size();
572
573 WriteTranslation(environment->outer(), translation);
574 WriteTranslationFrame(environment, translation);
575
576 int object_index = 0;
577 int dematerialized_index = 0;
578 for (int i = 0; i < translation_size; ++i) {
579 LOperand* value = environment->values()->at(i);
580 AddToTranslation(
581 environment, translation, value, environment->HasTaggedValueAt(i),
582 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
583 }
584}
585
586
587void LCodeGen::AddToTranslation(LEnvironment* environment,
588 Translation* translation,
589 LOperand* op,
590 bool is_tagged,
591 bool is_uint32,
592 int* object_index_pointer,
593 int* dematerialized_index_pointer) {
594 if (op == LEnvironment::materialization_marker()) {
595 int object_index = (*object_index_pointer)++;
596 if (environment->ObjectIsDuplicateAt(object_index)) {
597 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
598 translation->DuplicateObject(dupe_of);
599 return;
600 }
601 int object_length = environment->ObjectLengthAt(object_index);
602 if (environment->ObjectIsArgumentsAt(object_index)) {
603 translation->BeginArgumentsObject(object_length);
604 } else {
605 translation->BeginCapturedObject(object_length);
606 }
607 int dematerialized_index = *dematerialized_index_pointer;
608 int env_offset = environment->translation_size() + dematerialized_index;
609 *dematerialized_index_pointer += object_length;
610 for (int i = 0; i < object_length; ++i) {
611 LOperand* value = environment->values()->at(env_offset + i);
612 AddToTranslation(environment,
613 translation,
614 value,
615 environment->HasTaggedValueAt(env_offset + i),
616 environment->HasUint32ValueAt(env_offset + i),
617 object_index_pointer,
618 dematerialized_index_pointer);
619 }
620 return;
621 }
622
623 if (op->IsStackSlot()) {
624 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000625 if (is_tagged) {
626 translation->StoreStackSlot(index);
627 } else if (is_uint32) {
628 translation->StoreUint32StackSlot(index);
629 } else {
630 translation->StoreInt32StackSlot(index);
631 }
632 } else if (op->IsDoubleStackSlot()) {
633 int index = op->index();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000634 translation->StoreDoubleStackSlot(index);
635 } else if (op->IsRegister()) {
636 Register reg = ToRegister(op);
637 if (is_tagged) {
638 translation->StoreRegister(reg);
639 } else if (is_uint32) {
640 translation->StoreUint32Register(reg);
641 } else {
642 translation->StoreInt32Register(reg);
643 }
644 } else if (op->IsDoubleRegister()) {
645 DoubleRegister reg = ToDoubleRegister(op);
646 translation->StoreDoubleRegister(reg);
647 } else if (op->IsConstantOperand()) {
648 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
649 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
650 translation->StoreLiteral(src_index);
651 } else {
652 UNREACHABLE();
653 }
654}
655
656
657int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) {
658 int size = masm()->CallSize(code, mode);
659 if (code->kind() == Code::BINARY_OP_IC ||
660 code->kind() == Code::COMPARE_IC) {
661 size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric.
662 }
663 return size;
664}
665
666
667void LCodeGen::CallCode(Handle<Code> code,
668 RelocInfo::Mode mode,
669 LInstruction* instr,
670 TargetAddressStorageMode storage_mode) {
671 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
672}
673
674
675void LCodeGen::CallCodeGeneric(Handle<Code> code,
676 RelocInfo::Mode mode,
677 LInstruction* instr,
678 SafepointMode safepoint_mode,
679 TargetAddressStorageMode storage_mode) {
680 DCHECK(instr != NULL);
681 // Block literal pool emission to ensure nop indicating no inlined smi code
682 // is in the correct position.
683 Assembler::BlockConstPoolScope block_const_pool(masm());
684 __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
685 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
686
687 // Signal that we don't inline smi code before these stubs in the
688 // optimizing code generator.
689 if (code->kind() == Code::BINARY_OP_IC ||
690 code->kind() == Code::COMPARE_IC) {
691 __ nop();
692 }
693}
694
695
696void LCodeGen::CallRuntime(const Runtime::Function* function,
697 int num_arguments,
698 LInstruction* instr,
699 SaveFPRegsMode save_doubles) {
700 DCHECK(instr != NULL);
701
702 __ CallRuntime(function, num_arguments, save_doubles);
703
704 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
705}
706
707
708void LCodeGen::LoadContextFromDeferred(LOperand* context) {
709 if (context->IsRegister()) {
710 __ Move(cp, ToRegister(context));
711 } else if (context->IsStackSlot()) {
712 __ ldr(cp, ToMemOperand(context));
713 } else if (context->IsConstantOperand()) {
714 HConstant* constant =
715 chunk_->LookupConstant(LConstantOperand::cast(context));
716 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
717 } else {
718 UNREACHABLE();
719 }
720}
721
722
723void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
724 int argc,
725 LInstruction* instr,
726 LOperand* context) {
727 LoadContextFromDeferred(context);
728 __ CallRuntimeSaveDoubles(id);
729 RecordSafepointWithRegisters(
730 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
731}
732
733
734void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
735 Safepoint::DeoptMode mode) {
736 environment->set_has_been_used();
737 if (!environment->HasBeenRegistered()) {
738 // Physical stack frame layout:
739 // -x ............. -4 0 ..................................... y
740 // [incoming arguments] [spill slots] [pushed outgoing arguments]
741
742 // Layout of the environment:
743 // 0 ..................................................... size-1
744 // [parameters] [locals] [expression stack including arguments]
745
746 // Layout of the translation:
747 // 0 ........................................................ size - 1 + 4
748 // [expression stack including arguments] [locals] [4 words] [parameters]
749 // |>------------ translation_size ------------<|
750
751 int frame_count = 0;
752 int jsframe_count = 0;
753 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
754 ++frame_count;
755 if (e->frame_type() == JS_FUNCTION) {
756 ++jsframe_count;
757 }
758 }
759 Translation translation(&translations_, frame_count, jsframe_count, zone());
760 WriteTranslation(environment, &translation);
761 int deoptimization_index = deoptimizations_.length();
762 int pc_offset = masm()->pc_offset();
763 environment->Register(deoptimization_index,
764 translation.index(),
765 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
766 deoptimizations_.Add(environment, zone());
767 }
768}
769
770
771void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
772 Deoptimizer::DeoptReason deopt_reason,
773 Deoptimizer::BailoutType bailout_type) {
774 LEnvironment* environment = instr->environment();
775 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
776 DCHECK(environment->HasBeenRegistered());
777 int id = environment->deoptimization_index();
778 Address entry =
779 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
780 if (entry == NULL) {
781 Abort(kBailoutWasNotPrepared);
782 return;
783 }
784
785 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
786 Register scratch = scratch0();
787 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
788
789 // Store the condition on the stack if necessary
790 if (condition != al) {
791 __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
792 __ mov(scratch, Operand(1), LeaveCC, condition);
793 __ push(scratch);
794 }
795
796 __ push(r1);
797 __ mov(scratch, Operand(count));
798 __ ldr(r1, MemOperand(scratch));
799 __ sub(r1, r1, Operand(1), SetCC);
800 __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
801 __ str(r1, MemOperand(scratch));
802 __ pop(r1);
803
804 if (condition != al) {
805 // Clean up the stack before the deoptimizer call
806 __ pop(scratch);
807 }
808
809 __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
810
811 // 'Restore' the condition in a slightly hacky way. (It would be better
812 // to use 'msr' and 'mrs' instructions here, but they are not supported by
813 // our ARM simulator).
814 if (condition != al) {
815 condition = ne;
816 __ cmp(scratch, Operand::Zero());
817 }
818 }
819
820 if (info()->ShouldTrapOnDeopt()) {
821 __ stop("trap_on_deopt", condition);
822 }
823
Ben Murdochc5610432016-08-08 18:44:38 +0100824 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000825
826 DCHECK(info()->IsStub() || frame_is_built_);
827 // Go through jump table if we need to handle condition, build frame, or
828 // restore caller doubles.
829 if (condition == al && frame_is_built_ &&
830 !info()->saves_caller_doubles()) {
831 DeoptComment(deopt_info);
832 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000833 } else {
834 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
835 !frame_is_built_);
836 // We often have several deopts to the same entry, reuse the last
837 // jump entry if this is the case.
838 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
839 jump_table_.is_empty() ||
840 !table_entry.IsEquivalentTo(jump_table_.last())) {
841 jump_table_.Add(table_entry, zone());
842 }
843 __ b(condition, &jump_table_.last().label);
844 }
845}
846
847
848void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
849 Deoptimizer::DeoptReason deopt_reason) {
850 Deoptimizer::BailoutType bailout_type = info()->IsStub()
851 ? Deoptimizer::LAZY
852 : Deoptimizer::EAGER;
853 DeoptimizeIf(condition, instr, deopt_reason, bailout_type);
854}
855
856
857void LCodeGen::RecordSafepointWithLazyDeopt(
858 LInstruction* instr, SafepointMode safepoint_mode) {
859 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
860 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
861 } else {
862 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
863 RecordSafepointWithRegisters(
864 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
865 }
866}
867
868
869void LCodeGen::RecordSafepoint(
870 LPointerMap* pointers,
871 Safepoint::Kind kind,
872 int arguments,
873 Safepoint::DeoptMode deopt_mode) {
874 DCHECK(expected_safepoint_kind_ == kind);
875
876 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
877 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
878 kind, arguments, deopt_mode);
879 for (int i = 0; i < operands->length(); i++) {
880 LOperand* pointer = operands->at(i);
881 if (pointer->IsStackSlot()) {
882 safepoint.DefinePointerSlot(pointer->index(), zone());
883 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
884 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
885 }
886 }
887}
888
889
890void LCodeGen::RecordSafepoint(LPointerMap* pointers,
891 Safepoint::DeoptMode deopt_mode) {
892 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
893}
894
895
896void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
897 LPointerMap empty_pointers(zone());
898 RecordSafepoint(&empty_pointers, deopt_mode);
899}
900
901
902void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
903 int arguments,
904 Safepoint::DeoptMode deopt_mode) {
905 RecordSafepoint(
906 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
907}
908
909
910void LCodeGen::RecordAndWritePosition(int position) {
911 if (position == RelocInfo::kNoPosition) return;
912 masm()->positions_recorder()->RecordPosition(position);
913 masm()->positions_recorder()->WriteRecordedPositions();
914}
915
916
917static const char* LabelType(LLabel* label) {
918 if (label->is_loop_header()) return " (loop header)";
919 if (label->is_osr_entry()) return " (OSR entry)";
920 return "";
921}
922
923
924void LCodeGen::DoLabel(LLabel* label) {
925 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
926 current_instruction_,
927 label->hydrogen_value()->id(),
928 label->block_id(),
929 LabelType(label));
930 __ bind(label->label());
931 current_block_ = label->block_id();
932 DoGap(label);
933}
934
935
936void LCodeGen::DoParallelMove(LParallelMove* move) {
937 resolver_.Resolve(move);
938}
939
940
941void LCodeGen::DoGap(LGap* gap) {
942 for (int i = LGap::FIRST_INNER_POSITION;
943 i <= LGap::LAST_INNER_POSITION;
944 i++) {
945 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
946 LParallelMove* move = gap->GetParallelMove(inner_pos);
947 if (move != NULL) DoParallelMove(move);
948 }
949}
950
951
952void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
953 DoGap(instr);
954}
955
956
957void LCodeGen::DoParameter(LParameter* instr) {
958 // Nothing to do.
959}
960
961
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000962void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
963 GenerateOsrPrologue();
964}
965
966
967void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
968 Register dividend = ToRegister(instr->dividend());
969 int32_t divisor = instr->divisor();
970 DCHECK(dividend.is(ToRegister(instr->result())));
971
972 // Theoretically, a variation of the branch-free code for integer division by
973 // a power of 2 (calculating the remainder via an additional multiplication
974 // (which gets simplified to an 'and') and subtraction) should be faster, and
975 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
976 // indicate that positive dividends are heavily favored, so the branching
977 // version performs better.
978 HMod* hmod = instr->hydrogen();
979 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
980 Label dividend_is_not_negative, done;
981 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
982 __ cmp(dividend, Operand::Zero());
983 __ b(pl, &dividend_is_not_negative);
984 // Note that this is correct even for kMinInt operands.
985 __ rsb(dividend, dividend, Operand::Zero());
986 __ and_(dividend, dividend, Operand(mask));
987 __ rsb(dividend, dividend, Operand::Zero(), SetCC);
988 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
989 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
990 }
991 __ b(&done);
992 }
993
994 __ bind(&dividend_is_not_negative);
995 __ and_(dividend, dividend, Operand(mask));
996 __ bind(&done);
997}
998
999
1000void LCodeGen::DoModByConstI(LModByConstI* instr) {
1001 Register dividend = ToRegister(instr->dividend());
1002 int32_t divisor = instr->divisor();
1003 Register result = ToRegister(instr->result());
1004 DCHECK(!dividend.is(result));
1005
1006 if (divisor == 0) {
1007 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1008 return;
1009 }
1010
1011 __ TruncatingDiv(result, dividend, Abs(divisor));
1012 __ mov(ip, Operand(Abs(divisor)));
1013 __ smull(result, ip, result, ip);
1014 __ sub(result, dividend, result, SetCC);
1015
1016 // Check for negative zero.
1017 HMod* hmod = instr->hydrogen();
1018 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1019 Label remainder_not_zero;
1020 __ b(ne, &remainder_not_zero);
1021 __ cmp(dividend, Operand::Zero());
1022 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1023 __ bind(&remainder_not_zero);
1024 }
1025}
1026
1027
1028void LCodeGen::DoModI(LModI* instr) {
1029 HMod* hmod = instr->hydrogen();
1030 if (CpuFeatures::IsSupported(SUDIV)) {
1031 CpuFeatureScope scope(masm(), SUDIV);
1032
1033 Register left_reg = ToRegister(instr->left());
1034 Register right_reg = ToRegister(instr->right());
1035 Register result_reg = ToRegister(instr->result());
1036
1037 Label done;
1038 // Check for x % 0, sdiv might signal an exception. We have to deopt in this
1039 // case because we can't return a NaN.
1040 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1041 __ cmp(right_reg, Operand::Zero());
1042 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1043 }
1044
1045 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
1046 // want. We have to deopt if we care about -0, because we can't return that.
1047 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1048 Label no_overflow_possible;
1049 __ cmp(left_reg, Operand(kMinInt));
1050 __ b(ne, &no_overflow_possible);
1051 __ cmp(right_reg, Operand(-1));
1052 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1053 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1054 } else {
1055 __ b(ne, &no_overflow_possible);
1056 __ mov(result_reg, Operand::Zero());
1057 __ jmp(&done);
1058 }
1059 __ bind(&no_overflow_possible);
1060 }
1061
1062 // For 'r3 = r1 % r2' we can have the following ARM code:
1063 // sdiv r3, r1, r2
1064 // mls r3, r3, r2, r1
1065
1066 __ sdiv(result_reg, left_reg, right_reg);
1067 __ Mls(result_reg, result_reg, right_reg, left_reg);
1068
1069 // If we care about -0, test if the dividend is <0 and the result is 0.
1070 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1071 __ cmp(result_reg, Operand::Zero());
1072 __ b(ne, &done);
1073 __ cmp(left_reg, Operand::Zero());
1074 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1075 }
1076 __ bind(&done);
1077
1078 } else {
1079 // General case, without any SDIV support.
1080 Register left_reg = ToRegister(instr->left());
1081 Register right_reg = ToRegister(instr->right());
1082 Register result_reg = ToRegister(instr->result());
1083 Register scratch = scratch0();
1084 DCHECK(!scratch.is(left_reg));
1085 DCHECK(!scratch.is(right_reg));
1086 DCHECK(!scratch.is(result_reg));
1087 DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1088 DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1089 DCHECK(!divisor.is(dividend));
1090 LowDwVfpRegister quotient = double_scratch0();
1091 DCHECK(!quotient.is(dividend));
1092 DCHECK(!quotient.is(divisor));
1093
1094 Label done;
1095 // Check for x % 0, we have to deopt in this case because we can't return a
1096 // NaN.
1097 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1098 __ cmp(right_reg, Operand::Zero());
1099 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1100 }
1101
1102 __ Move(result_reg, left_reg);
1103 // Load the arguments in VFP registers. The divisor value is preloaded
1104 // before. Be careful that 'right_reg' is only live on entry.
1105 // TODO(svenpanne) The last comments seems to be wrong nowadays.
1106 __ vmov(double_scratch0().low(), left_reg);
1107 __ vcvt_f64_s32(dividend, double_scratch0().low());
1108 __ vmov(double_scratch0().low(), right_reg);
1109 __ vcvt_f64_s32(divisor, double_scratch0().low());
1110
1111 // We do not care about the sign of the divisor. Note that we still handle
1112 // the kMinInt % -1 case correctly, though.
1113 __ vabs(divisor, divisor);
1114 // Compute the quotient and round it to a 32bit integer.
1115 __ vdiv(quotient, dividend, divisor);
1116 __ vcvt_s32_f64(quotient.low(), quotient);
1117 __ vcvt_f64_s32(quotient, quotient.low());
1118
1119 // Compute the remainder in result.
1120 __ vmul(double_scratch0(), divisor, quotient);
1121 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1122 __ vmov(scratch, double_scratch0().low());
1123 __ sub(result_reg, left_reg, scratch, SetCC);
1124
1125 // If we care about -0, test if the dividend is <0 and the result is 0.
1126 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1127 __ b(ne, &done);
1128 __ cmp(left_reg, Operand::Zero());
1129 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
1130 }
1131 __ bind(&done);
1132 }
1133}
1134
1135
1136void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1137 Register dividend = ToRegister(instr->dividend());
1138 int32_t divisor = instr->divisor();
1139 Register result = ToRegister(instr->result());
1140 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1141 DCHECK(!result.is(dividend));
1142
1143 // Check for (0 / -x) that will produce negative zero.
1144 HDiv* hdiv = instr->hydrogen();
1145 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1146 __ cmp(dividend, Operand::Zero());
1147 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1148 }
1149 // Check for (kMinInt / -1).
1150 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1151 __ cmp(dividend, Operand(kMinInt));
1152 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1153 }
1154 // Deoptimize if remainder will not be 0.
1155 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1156 divisor != 1 && divisor != -1) {
1157 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1158 __ tst(dividend, Operand(mask));
1159 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1160 }
1161
1162 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1163 __ rsb(result, dividend, Operand(0));
1164 return;
1165 }
1166 int32_t shift = WhichPowerOf2Abs(divisor);
1167 if (shift == 0) {
1168 __ mov(result, dividend);
1169 } else if (shift == 1) {
1170 __ add(result, dividend, Operand(dividend, LSR, 31));
1171 } else {
1172 __ mov(result, Operand(dividend, ASR, 31));
1173 __ add(result, dividend, Operand(result, LSR, 32 - shift));
1174 }
1175 if (shift > 0) __ mov(result, Operand(result, ASR, shift));
1176 if (divisor < 0) __ rsb(result, result, Operand(0));
1177}
1178
1179
1180void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1181 Register dividend = ToRegister(instr->dividend());
1182 int32_t divisor = instr->divisor();
1183 Register result = ToRegister(instr->result());
1184 DCHECK(!dividend.is(result));
1185
1186 if (divisor == 0) {
1187 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1188 return;
1189 }
1190
1191 // Check for (0 / -x) that will produce negative zero.
1192 HDiv* hdiv = instr->hydrogen();
1193 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1194 __ cmp(dividend, Operand::Zero());
1195 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1196 }
1197
1198 __ TruncatingDiv(result, dividend, Abs(divisor));
1199 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1200
1201 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1202 __ mov(ip, Operand(divisor));
1203 __ smull(scratch0(), ip, result, ip);
1204 __ sub(scratch0(), scratch0(), dividend, SetCC);
1205 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1206 }
1207}
1208
1209
1210// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1211void LCodeGen::DoDivI(LDivI* instr) {
1212 HBinaryOperation* hdiv = instr->hydrogen();
1213 Register dividend = ToRegister(instr->dividend());
1214 Register divisor = ToRegister(instr->divisor());
1215 Register result = ToRegister(instr->result());
1216
1217 // Check for x / 0.
1218 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1219 __ cmp(divisor, Operand::Zero());
1220 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1221 }
1222
1223 // Check for (0 / -x) that will produce negative zero.
1224 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1225 Label positive;
1226 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1227 // Do the test only if it hadn't be done above.
1228 __ cmp(divisor, Operand::Zero());
1229 }
1230 __ b(pl, &positive);
1231 __ cmp(dividend, Operand::Zero());
1232 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1233 __ bind(&positive);
1234 }
1235
1236 // Check for (kMinInt / -1).
1237 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1238 (!CpuFeatures::IsSupported(SUDIV) ||
1239 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1240 // We don't need to check for overflow when truncating with sdiv
1241 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1242 __ cmp(dividend, Operand(kMinInt));
1243 __ cmp(divisor, Operand(-1), eq);
1244 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1245 }
1246
1247 if (CpuFeatures::IsSupported(SUDIV)) {
1248 CpuFeatureScope scope(masm(), SUDIV);
1249 __ sdiv(result, dividend, divisor);
1250 } else {
1251 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1252 DoubleRegister vright = double_scratch0();
1253 __ vmov(double_scratch0().low(), dividend);
1254 __ vcvt_f64_s32(vleft, double_scratch0().low());
1255 __ vmov(double_scratch0().low(), divisor);
1256 __ vcvt_f64_s32(vright, double_scratch0().low());
1257 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1258 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1259 __ vmov(result, double_scratch0().low());
1260 }
1261
1262 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1263 // Compute remainder and deopt if it's not zero.
1264 Register remainder = scratch0();
1265 __ Mls(remainder, result, divisor, dividend);
1266 __ cmp(remainder, Operand::Zero());
1267 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1268 }
1269}
1270
1271
1272void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1273 DwVfpRegister addend = ToDoubleRegister(instr->addend());
1274 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1275 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1276
1277 // This is computed in-place.
1278 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1279
1280 __ vmla(addend, multiplier, multiplicand);
1281}
1282
1283
1284void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1285 DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1286 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1287 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1288
1289 // This is computed in-place.
1290 DCHECK(minuend.is(ToDoubleRegister(instr->result())));
1291
1292 __ vmls(minuend, multiplier, multiplicand);
1293}
1294
1295
1296void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1297 Register dividend = ToRegister(instr->dividend());
1298 Register result = ToRegister(instr->result());
1299 int32_t divisor = instr->divisor();
1300
1301 // If the divisor is 1, return the dividend.
1302 if (divisor == 1) {
1303 __ Move(result, dividend);
1304 return;
1305 }
1306
1307 // If the divisor is positive, things are easy: There can be no deopts and we
1308 // can simply do an arithmetic right shift.
1309 int32_t shift = WhichPowerOf2Abs(divisor);
1310 if (divisor > 1) {
1311 __ mov(result, Operand(dividend, ASR, shift));
1312 return;
1313 }
1314
1315 // If the divisor is negative, we have to negate and handle edge cases.
1316 __ rsb(result, dividend, Operand::Zero(), SetCC);
1317 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1318 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1319 }
1320
1321 // Dividing by -1 is basically negation, unless we overflow.
1322 if (divisor == -1) {
1323 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1324 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1325 }
1326 return;
1327 }
1328
1329 // If the negation could not overflow, simply shifting is OK.
1330 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1331 __ mov(result, Operand(result, ASR, shift));
1332 return;
1333 }
1334
1335 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
1336 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc);
1337}
1338
1339
1340void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1341 Register dividend = ToRegister(instr->dividend());
1342 int32_t divisor = instr->divisor();
1343 Register result = ToRegister(instr->result());
1344 DCHECK(!dividend.is(result));
1345
1346 if (divisor == 0) {
1347 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1348 return;
1349 }
1350
1351 // Check for (0 / -x) that will produce negative zero.
1352 HMathFloorOfDiv* hdiv = instr->hydrogen();
1353 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1354 __ cmp(dividend, Operand::Zero());
1355 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1356 }
1357
1358 // Easy case: We need no dynamic check for the dividend and the flooring
1359 // division is the same as the truncating division.
1360 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1361 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1362 __ TruncatingDiv(result, dividend, Abs(divisor));
1363 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1364 return;
1365 }
1366
1367 // In the general case we may need to adjust before and after the truncating
1368 // division to get a flooring division.
1369 Register temp = ToRegister(instr->temp());
1370 DCHECK(!temp.is(dividend) && !temp.is(result));
1371 Label needs_adjustment, done;
1372 __ cmp(dividend, Operand::Zero());
1373 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1374 __ TruncatingDiv(result, dividend, Abs(divisor));
1375 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1376 __ jmp(&done);
1377 __ bind(&needs_adjustment);
1378 __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1379 __ TruncatingDiv(result, temp, Abs(divisor));
1380 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1381 __ sub(result, result, Operand(1));
1382 __ bind(&done);
1383}
1384
1385
1386// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1387void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1388 HBinaryOperation* hdiv = instr->hydrogen();
1389 Register left = ToRegister(instr->dividend());
1390 Register right = ToRegister(instr->divisor());
1391 Register result = ToRegister(instr->result());
1392
1393 // Check for x / 0.
1394 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1395 __ cmp(right, Operand::Zero());
1396 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1397 }
1398
1399 // Check for (0 / -x) that will produce negative zero.
1400 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1401 Label positive;
1402 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1403 // Do the test only if it hadn't be done above.
1404 __ cmp(right, Operand::Zero());
1405 }
1406 __ b(pl, &positive);
1407 __ cmp(left, Operand::Zero());
1408 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1409 __ bind(&positive);
1410 }
1411
1412 // Check for (kMinInt / -1).
1413 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1414 (!CpuFeatures::IsSupported(SUDIV) ||
1415 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1416 // We don't need to check for overflow when truncating with sdiv
1417 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1418 __ cmp(left, Operand(kMinInt));
1419 __ cmp(right, Operand(-1), eq);
1420 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1421 }
1422
1423 if (CpuFeatures::IsSupported(SUDIV)) {
1424 CpuFeatureScope scope(masm(), SUDIV);
1425 __ sdiv(result, left, right);
1426 } else {
1427 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1428 DoubleRegister vright = double_scratch0();
1429 __ vmov(double_scratch0().low(), left);
1430 __ vcvt_f64_s32(vleft, double_scratch0().low());
1431 __ vmov(double_scratch0().low(), right);
1432 __ vcvt_f64_s32(vright, double_scratch0().low());
1433 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1434 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1435 __ vmov(result, double_scratch0().low());
1436 }
1437
1438 Label done;
1439 Register remainder = scratch0();
1440 __ Mls(remainder, result, right, left);
1441 __ cmp(remainder, Operand::Zero());
1442 __ b(eq, &done);
1443 __ eor(remainder, remainder, Operand(right));
1444 __ add(result, result, Operand(remainder, ASR, 31));
1445 __ bind(&done);
1446}
1447
1448
1449void LCodeGen::DoMulI(LMulI* instr) {
1450 Register result = ToRegister(instr->result());
1451 // Note that result may alias left.
1452 Register left = ToRegister(instr->left());
1453 LOperand* right_op = instr->right();
1454
1455 bool bailout_on_minus_zero =
1456 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1457 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1458
1459 if (right_op->IsConstantOperand()) {
1460 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1461
1462 if (bailout_on_minus_zero && (constant < 0)) {
1463 // The case of a null constant will be handled separately.
1464 // If constant is negative and left is null, the result should be -0.
1465 __ cmp(left, Operand::Zero());
1466 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1467 }
1468
1469 switch (constant) {
1470 case -1:
1471 if (overflow) {
1472 __ rsb(result, left, Operand::Zero(), SetCC);
1473 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1474 } else {
1475 __ rsb(result, left, Operand::Zero());
1476 }
1477 break;
1478 case 0:
1479 if (bailout_on_minus_zero) {
1480 // If left is strictly negative and the constant is null, the
1481 // result is -0. Deoptimize if required, otherwise return 0.
1482 __ cmp(left, Operand::Zero());
1483 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
1484 }
1485 __ mov(result, Operand::Zero());
1486 break;
1487 case 1:
1488 __ Move(result, left);
1489 break;
1490 default:
1491 // Multiplying by powers of two and powers of two plus or minus
1492 // one can be done faster with shifted operands.
1493 // For other constants we emit standard code.
1494 int32_t mask = constant >> 31;
1495 uint32_t constant_abs = (constant + mask) ^ mask;
1496
1497 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1498 int32_t shift = WhichPowerOf2(constant_abs);
1499 __ mov(result, Operand(left, LSL, shift));
1500 // Correct the sign of the result is the constant is negative.
1501 if (constant < 0) __ rsb(result, result, Operand::Zero());
1502 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1503 int32_t shift = WhichPowerOf2(constant_abs - 1);
1504 __ add(result, left, Operand(left, LSL, shift));
1505 // Correct the sign of the result is the constant is negative.
1506 if (constant < 0) __ rsb(result, result, Operand::Zero());
1507 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1508 int32_t shift = WhichPowerOf2(constant_abs + 1);
1509 __ rsb(result, left, Operand(left, LSL, shift));
1510 // Correct the sign of the result is the constant is negative.
1511 if (constant < 0) __ rsb(result, result, Operand::Zero());
1512 } else {
1513 // Generate standard code.
1514 __ mov(ip, Operand(constant));
1515 __ mul(result, left, ip);
1516 }
1517 }
1518
1519 } else {
1520 DCHECK(right_op->IsRegister());
1521 Register right = ToRegister(right_op);
1522
1523 if (overflow) {
1524 Register scratch = scratch0();
1525 // scratch:result = left * right.
1526 if (instr->hydrogen()->representation().IsSmi()) {
1527 __ SmiUntag(result, left);
1528 __ smull(result, scratch, result, right);
1529 } else {
1530 __ smull(result, scratch, left, right);
1531 }
1532 __ cmp(scratch, Operand(result, ASR, 31));
1533 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1534 } else {
1535 if (instr->hydrogen()->representation().IsSmi()) {
1536 __ SmiUntag(result, left);
1537 __ mul(result, result, right);
1538 } else {
1539 __ mul(result, left, right);
1540 }
1541 }
1542
1543 if (bailout_on_minus_zero) {
1544 Label done;
1545 __ teq(left, Operand(right));
1546 __ b(pl, &done);
1547 // Bail out if the result is minus zero.
1548 __ cmp(result, Operand::Zero());
1549 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1550 __ bind(&done);
1551 }
1552 }
1553}
1554
1555
1556void LCodeGen::DoBitI(LBitI* instr) {
1557 LOperand* left_op = instr->left();
1558 LOperand* right_op = instr->right();
1559 DCHECK(left_op->IsRegister());
1560 Register left = ToRegister(left_op);
1561 Register result = ToRegister(instr->result());
1562 Operand right(no_reg);
1563
1564 if (right_op->IsStackSlot()) {
1565 right = Operand(EmitLoadRegister(right_op, ip));
1566 } else {
1567 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1568 right = ToOperand(right_op);
1569 }
1570
1571 switch (instr->op()) {
1572 case Token::BIT_AND:
1573 __ and_(result, left, right);
1574 break;
1575 case Token::BIT_OR:
1576 __ orr(result, left, right);
1577 break;
1578 case Token::BIT_XOR:
1579 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1580 __ mvn(result, Operand(left));
1581 } else {
1582 __ eor(result, left, right);
1583 }
1584 break;
1585 default:
1586 UNREACHABLE();
1587 break;
1588 }
1589}
1590
1591
1592void LCodeGen::DoShiftI(LShiftI* instr) {
1593 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1594 // result may alias either of them.
1595 LOperand* right_op = instr->right();
1596 Register left = ToRegister(instr->left());
1597 Register result = ToRegister(instr->result());
1598 Register scratch = scratch0();
1599 if (right_op->IsRegister()) {
1600 // Mask the right_op operand.
1601 __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1602 switch (instr->op()) {
1603 case Token::ROR:
1604 __ mov(result, Operand(left, ROR, scratch));
1605 break;
1606 case Token::SAR:
1607 __ mov(result, Operand(left, ASR, scratch));
1608 break;
1609 case Token::SHR:
1610 if (instr->can_deopt()) {
1611 __ mov(result, Operand(left, LSR, scratch), SetCC);
1612 DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue);
1613 } else {
1614 __ mov(result, Operand(left, LSR, scratch));
1615 }
1616 break;
1617 case Token::SHL:
1618 __ mov(result, Operand(left, LSL, scratch));
1619 break;
1620 default:
1621 UNREACHABLE();
1622 break;
1623 }
1624 } else {
1625 // Mask the right_op operand.
1626 int value = ToInteger32(LConstantOperand::cast(right_op));
1627 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1628 switch (instr->op()) {
1629 case Token::ROR:
1630 if (shift_count != 0) {
1631 __ mov(result, Operand(left, ROR, shift_count));
1632 } else {
1633 __ Move(result, left);
1634 }
1635 break;
1636 case Token::SAR:
1637 if (shift_count != 0) {
1638 __ mov(result, Operand(left, ASR, shift_count));
1639 } else {
1640 __ Move(result, left);
1641 }
1642 break;
1643 case Token::SHR:
1644 if (shift_count != 0) {
1645 __ mov(result, Operand(left, LSR, shift_count));
1646 } else {
1647 if (instr->can_deopt()) {
1648 __ tst(left, Operand(0x80000000));
1649 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
1650 }
1651 __ Move(result, left);
1652 }
1653 break;
1654 case Token::SHL:
1655 if (shift_count != 0) {
1656 if (instr->hydrogen_value()->representation().IsSmi() &&
1657 instr->can_deopt()) {
1658 if (shift_count != 1) {
1659 __ mov(result, Operand(left, LSL, shift_count - 1));
1660 __ SmiTag(result, result, SetCC);
1661 } else {
1662 __ SmiTag(result, left, SetCC);
1663 }
1664 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1665 } else {
1666 __ mov(result, Operand(left, LSL, shift_count));
1667 }
1668 } else {
1669 __ Move(result, left);
1670 }
1671 break;
1672 default:
1673 UNREACHABLE();
1674 break;
1675 }
1676 }
1677}
1678
1679
1680void LCodeGen::DoSubI(LSubI* instr) {
1681 LOperand* left = instr->left();
1682 LOperand* right = instr->right();
1683 LOperand* result = instr->result();
1684 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1685 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1686
1687 if (right->IsStackSlot()) {
1688 Register right_reg = EmitLoadRegister(right, ip);
1689 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1690 } else {
1691 DCHECK(right->IsRegister() || right->IsConstantOperand());
1692 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1693 }
1694
1695 if (can_overflow) {
1696 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1697 }
1698}
1699
1700
1701void LCodeGen::DoRSubI(LRSubI* instr) {
1702 LOperand* left = instr->left();
1703 LOperand* right = instr->right();
1704 LOperand* result = instr->result();
1705 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1706 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1707
1708 if (right->IsStackSlot()) {
1709 Register right_reg = EmitLoadRegister(right, ip);
1710 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1711 } else {
1712 DCHECK(right->IsRegister() || right->IsConstantOperand());
1713 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1714 }
1715
1716 if (can_overflow) {
1717 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1718 }
1719}
1720
1721
1722void LCodeGen::DoConstantI(LConstantI* instr) {
1723 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1724}
1725
1726
1727void LCodeGen::DoConstantS(LConstantS* instr) {
1728 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1729}
1730
1731
1732void LCodeGen::DoConstantD(LConstantD* instr) {
1733 DCHECK(instr->result()->IsDoubleRegister());
1734 DwVfpRegister result = ToDoubleRegister(instr->result());
1735#if V8_HOST_ARCH_IA32
1736 // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
1737 // builds.
1738 uint64_t bits = instr->bits();
1739 if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
1740 V8_UINT64_C(0x7FF0000000000000)) {
1741 uint32_t lo = static_cast<uint32_t>(bits);
1742 uint32_t hi = static_cast<uint32_t>(bits >> 32);
1743 __ mov(ip, Operand(lo));
1744 __ mov(scratch0(), Operand(hi));
1745 __ vmov(result, ip, scratch0());
1746 return;
1747 }
1748#endif
1749 double v = instr->value();
1750 __ Vmov(result, v, scratch0());
1751}
1752
1753
1754void LCodeGen::DoConstantE(LConstantE* instr) {
1755 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1756}
1757
1758
1759void LCodeGen::DoConstantT(LConstantT* instr) {
1760 Handle<Object> object = instr->value(isolate());
1761 AllowDeferredHandleDereference smi_check;
1762 __ Move(ToRegister(instr->result()), object);
1763}
1764
1765
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001766MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1767 LOperand* index,
1768 String::Encoding encoding) {
1769 if (index->IsConstantOperand()) {
1770 int offset = ToInteger32(LConstantOperand::cast(index));
1771 if (encoding == String::TWO_BYTE_ENCODING) {
1772 offset *= kUC16Size;
1773 }
1774 STATIC_ASSERT(kCharSize == 1);
1775 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1776 }
1777 Register scratch = scratch0();
1778 DCHECK(!scratch.is(string));
1779 DCHECK(!scratch.is(ToRegister(index)));
1780 if (encoding == String::ONE_BYTE_ENCODING) {
1781 __ add(scratch, string, Operand(ToRegister(index)));
1782 } else {
1783 STATIC_ASSERT(kUC16Size == 2);
1784 __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
1785 }
1786 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1787}
1788
1789
1790void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1791 String::Encoding encoding = instr->hydrogen()->encoding();
1792 Register string = ToRegister(instr->string());
1793 Register result = ToRegister(instr->result());
1794
1795 if (FLAG_debug_code) {
1796 Register scratch = scratch0();
1797 __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1798 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1799
1800 __ and_(scratch, scratch,
1801 Operand(kStringRepresentationMask | kStringEncodingMask));
1802 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1803 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1804 __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1805 ? one_byte_seq_type : two_byte_seq_type));
1806 __ Check(eq, kUnexpectedStringType);
1807 }
1808
1809 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1810 if (encoding == String::ONE_BYTE_ENCODING) {
1811 __ ldrb(result, operand);
1812 } else {
1813 __ ldrh(result, operand);
1814 }
1815}
1816
1817
1818void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1819 String::Encoding encoding = instr->hydrogen()->encoding();
1820 Register string = ToRegister(instr->string());
1821 Register value = ToRegister(instr->value());
1822
1823 if (FLAG_debug_code) {
1824 Register index = ToRegister(instr->index());
1825 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1826 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1827 int encoding_mask =
1828 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1829 ? one_byte_seq_type : two_byte_seq_type;
1830 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1831 }
1832
1833 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1834 if (encoding == String::ONE_BYTE_ENCODING) {
1835 __ strb(value, operand);
1836 } else {
1837 __ strh(value, operand);
1838 }
1839}
1840
1841
1842void LCodeGen::DoAddI(LAddI* instr) {
1843 LOperand* left = instr->left();
1844 LOperand* right = instr->right();
1845 LOperand* result = instr->result();
1846 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1847 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1848
1849 if (right->IsStackSlot()) {
1850 Register right_reg = EmitLoadRegister(right, ip);
1851 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1852 } else {
1853 DCHECK(right->IsRegister() || right->IsConstantOperand());
1854 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1855 }
1856
1857 if (can_overflow) {
1858 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1859 }
1860}
1861
1862
1863void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1864 LOperand* left = instr->left();
1865 LOperand* right = instr->right();
1866 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1867 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1868 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1869 Register left_reg = ToRegister(left);
1870 Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1871 ? ToOperand(right)
1872 : Operand(EmitLoadRegister(right, ip));
1873 Register result_reg = ToRegister(instr->result());
1874 __ cmp(left_reg, right_op);
1875 __ Move(result_reg, left_reg, condition);
1876 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
1877 } else {
1878 DCHECK(instr->hydrogen()->representation().IsDouble());
1879 DwVfpRegister left_reg = ToDoubleRegister(left);
1880 DwVfpRegister right_reg = ToDoubleRegister(right);
1881 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
1882 Label result_is_nan, return_left, return_right, check_zero, done;
1883 __ VFPCompareAndSetFlags(left_reg, right_reg);
1884 if (operation == HMathMinMax::kMathMin) {
1885 __ b(mi, &return_left);
1886 __ b(gt, &return_right);
1887 } else {
1888 __ b(mi, &return_right);
1889 __ b(gt, &return_left);
1890 }
1891 __ b(vs, &result_is_nan);
1892 // Left equals right => check for -0.
1893 __ VFPCompareAndSetFlags(left_reg, 0.0);
1894 if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
1895 __ b(ne, &done); // left == right != 0.
1896 } else {
1897 __ b(ne, &return_left); // left == right != 0.
1898 }
1899 // At this point, both left and right are either 0 or -0.
1900 if (operation == HMathMinMax::kMathMin) {
1901 // We could use a single 'vorr' instruction here if we had NEON support.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001902 // The algorithm is: -((-L) + (-R)), which in case of L and R being
1903 // different registers is most efficiently expressed as -((-L) - R).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001904 __ vneg(left_reg, left_reg);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001905 if (left_reg.is(right_reg)) {
1906 __ vadd(result_reg, left_reg, right_reg);
1907 } else {
1908 __ vsub(result_reg, left_reg, right_reg);
1909 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001910 __ vneg(result_reg, result_reg);
1911 } else {
1912 // Since we operate on +0 and/or -0, vadd and vand have the same effect;
1913 // the decision for vadd is easy because vand is a NEON instruction.
1914 __ vadd(result_reg, left_reg, right_reg);
1915 }
1916 __ b(&done);
1917
1918 __ bind(&result_is_nan);
1919 __ vadd(result_reg, left_reg, right_reg);
1920 __ b(&done);
1921
1922 __ bind(&return_right);
1923 __ Move(result_reg, right_reg);
1924 if (!left_reg.is(result_reg)) {
1925 __ b(&done);
1926 }
1927
1928 __ bind(&return_left);
1929 __ Move(result_reg, left_reg);
1930
1931 __ bind(&done);
1932 }
1933}
1934
1935
1936void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1937 DwVfpRegister left = ToDoubleRegister(instr->left());
1938 DwVfpRegister right = ToDoubleRegister(instr->right());
1939 DwVfpRegister result = ToDoubleRegister(instr->result());
1940 switch (instr->op()) {
1941 case Token::ADD:
1942 __ vadd(result, left, right);
1943 break;
1944 case Token::SUB:
1945 __ vsub(result, left, right);
1946 break;
1947 case Token::MUL:
1948 __ vmul(result, left, right);
1949 break;
1950 case Token::DIV:
1951 __ vdiv(result, left, right);
1952 break;
1953 case Token::MOD: {
1954 __ PrepareCallCFunction(0, 2, scratch0());
1955 __ MovToFloatParameters(left, right);
1956 __ CallCFunction(
1957 ExternalReference::mod_two_doubles_operation(isolate()),
1958 0, 2);
1959 // Move the result in the double result register.
1960 __ MovFromFloatResult(result);
1961 break;
1962 }
1963 default:
1964 UNREACHABLE();
1965 break;
1966 }
1967}
1968
1969
1970void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1971 DCHECK(ToRegister(instr->context()).is(cp));
1972 DCHECK(ToRegister(instr->left()).is(r1));
1973 DCHECK(ToRegister(instr->right()).is(r0));
1974 DCHECK(ToRegister(instr->result()).is(r0));
1975
Ben Murdoch097c5b22016-05-18 11:27:45 +01001976 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001977 // Block literal pool emission to ensure nop indicating no inlined smi code
1978 // is in the correct position.
1979 Assembler::BlockConstPoolScope block_const_pool(masm());
1980 CallCode(code, RelocInfo::CODE_TARGET, instr);
1981}
1982
1983
1984template<class InstrType>
1985void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1986 int left_block = instr->TrueDestination(chunk_);
1987 int right_block = instr->FalseDestination(chunk_);
1988
1989 int next_block = GetNextEmittedBlock();
1990
1991 if (right_block == left_block || condition == al) {
1992 EmitGoto(left_block);
1993 } else if (left_block == next_block) {
1994 __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
1995 } else if (right_block == next_block) {
1996 __ b(condition, chunk_->GetAssemblyLabel(left_block));
1997 } else {
1998 __ b(condition, chunk_->GetAssemblyLabel(left_block));
1999 __ b(chunk_->GetAssemblyLabel(right_block));
2000 }
2001}
2002
2003
2004template <class InstrType>
2005void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition) {
2006 int true_block = instr->TrueDestination(chunk_);
2007 __ b(condition, chunk_->GetAssemblyLabel(true_block));
2008}
2009
2010
2011template <class InstrType>
2012void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
2013 int false_block = instr->FalseDestination(chunk_);
2014 __ b(condition, chunk_->GetAssemblyLabel(false_block));
2015}
2016
2017
2018void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2019 __ stop("LBreak");
2020}
2021
2022
2023void LCodeGen::DoBranch(LBranch* instr) {
2024 Representation r = instr->hydrogen()->value()->representation();
2025 if (r.IsInteger32() || r.IsSmi()) {
2026 DCHECK(!info()->IsStub());
2027 Register reg = ToRegister(instr->value());
2028 __ cmp(reg, Operand::Zero());
2029 EmitBranch(instr, ne);
2030 } else if (r.IsDouble()) {
2031 DCHECK(!info()->IsStub());
2032 DwVfpRegister reg = ToDoubleRegister(instr->value());
2033 // Test the double value. Zero and NaN are false.
2034 __ VFPCompareAndSetFlags(reg, 0.0);
2035 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
2036 EmitBranch(instr, ne);
2037 } else {
2038 DCHECK(r.IsTagged());
2039 Register reg = ToRegister(instr->value());
2040 HType type = instr->hydrogen()->value()->type();
2041 if (type.IsBoolean()) {
2042 DCHECK(!info()->IsStub());
2043 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2044 EmitBranch(instr, eq);
2045 } else if (type.IsSmi()) {
2046 DCHECK(!info()->IsStub());
2047 __ cmp(reg, Operand::Zero());
2048 EmitBranch(instr, ne);
2049 } else if (type.IsJSArray()) {
2050 DCHECK(!info()->IsStub());
2051 EmitBranch(instr, al);
2052 } else if (type.IsHeapNumber()) {
2053 DCHECK(!info()->IsStub());
2054 DwVfpRegister dbl_scratch = double_scratch0();
2055 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2056 // Test the double value. Zero and NaN are false.
2057 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2058 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
2059 EmitBranch(instr, ne);
2060 } else if (type.IsString()) {
2061 DCHECK(!info()->IsStub());
2062 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2063 __ cmp(ip, Operand::Zero());
2064 EmitBranch(instr, ne);
2065 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01002066 ToBooleanICStub::Types expected =
2067 instr->hydrogen()->expected_input_types();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002068 // Avoid deopts in the case where we've never executed this path before.
Ben Murdochda12d292016-06-02 14:46:10 +01002069 if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002070
Ben Murdochda12d292016-06-02 14:46:10 +01002071 if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002072 // undefined -> false.
2073 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2074 __ b(eq, instr->FalseLabel(chunk_));
2075 }
Ben Murdochda12d292016-06-02 14:46:10 +01002076 if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002077 // Boolean -> its value.
2078 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2079 __ b(eq, instr->TrueLabel(chunk_));
2080 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2081 __ b(eq, instr->FalseLabel(chunk_));
2082 }
Ben Murdochda12d292016-06-02 14:46:10 +01002083 if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002084 // 'null' -> false.
2085 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2086 __ b(eq, instr->FalseLabel(chunk_));
2087 }
2088
Ben Murdochda12d292016-06-02 14:46:10 +01002089 if (expected.Contains(ToBooleanICStub::SMI)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002090 // Smis: 0 -> false, all other -> true.
2091 __ cmp(reg, Operand::Zero());
2092 __ b(eq, instr->FalseLabel(chunk_));
2093 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2094 } else if (expected.NeedsMap()) {
2095 // If we need a map later and have a Smi -> deopt.
2096 __ SmiTst(reg);
2097 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
2098 }
2099
2100 const Register map = scratch0();
2101 if (expected.NeedsMap()) {
2102 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2103
2104 if (expected.CanBeUndetectable()) {
2105 // Undetectable -> false.
2106 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2107 __ tst(ip, Operand(1 << Map::kIsUndetectable));
2108 __ b(ne, instr->FalseLabel(chunk_));
2109 }
2110 }
2111
Ben Murdochda12d292016-06-02 14:46:10 +01002112 if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002113 // spec object -> true.
2114 __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
2115 __ b(ge, instr->TrueLabel(chunk_));
2116 }
2117
Ben Murdochda12d292016-06-02 14:46:10 +01002118 if (expected.Contains(ToBooleanICStub::STRING)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002119 // String value -> false iff empty.
2120 Label not_string;
2121 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2122 __ b(ge, &not_string);
2123 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2124 __ cmp(ip, Operand::Zero());
2125 __ b(ne, instr->TrueLabel(chunk_));
2126 __ b(instr->FalseLabel(chunk_));
2127 __ bind(&not_string);
2128 }
2129
Ben Murdochda12d292016-06-02 14:46:10 +01002130 if (expected.Contains(ToBooleanICStub::SYMBOL)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002131 // Symbol value -> true.
2132 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2133 __ b(eq, instr->TrueLabel(chunk_));
2134 }
2135
Ben Murdochda12d292016-06-02 14:46:10 +01002136 if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002137 // SIMD value -> true.
2138 __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
2139 __ b(eq, instr->TrueLabel(chunk_));
2140 }
2141
Ben Murdochda12d292016-06-02 14:46:10 +01002142 if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002143 // heap number -> false iff +0, -0, or NaN.
2144 DwVfpRegister dbl_scratch = double_scratch0();
2145 Label not_heap_number;
2146 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2147 __ b(ne, &not_heap_number);
2148 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2149 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2150 __ cmp(r0, r0, vs); // NaN -> false.
2151 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
2152 __ b(instr->TrueLabel(chunk_));
2153 __ bind(&not_heap_number);
2154 }
2155
2156 if (!expected.IsGeneric()) {
2157 // We've seen something for the first time -> deopt.
2158 // This can only happen if we are not generic already.
2159 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
2160 }
2161 }
2162 }
2163}
2164
2165
2166void LCodeGen::EmitGoto(int block) {
2167 if (!IsNextEmittedBlock(block)) {
2168 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2169 }
2170}
2171
2172
2173void LCodeGen::DoGoto(LGoto* instr) {
2174 EmitGoto(instr->block_id());
2175}
2176
2177
2178Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2179 Condition cond = kNoCondition;
2180 switch (op) {
2181 case Token::EQ:
2182 case Token::EQ_STRICT:
2183 cond = eq;
2184 break;
2185 case Token::NE:
2186 case Token::NE_STRICT:
2187 cond = ne;
2188 break;
2189 case Token::LT:
2190 cond = is_unsigned ? lo : lt;
2191 break;
2192 case Token::GT:
2193 cond = is_unsigned ? hi : gt;
2194 break;
2195 case Token::LTE:
2196 cond = is_unsigned ? ls : le;
2197 break;
2198 case Token::GTE:
2199 cond = is_unsigned ? hs : ge;
2200 break;
2201 case Token::IN:
2202 case Token::INSTANCEOF:
2203 default:
2204 UNREACHABLE();
2205 }
2206 return cond;
2207}
2208
2209
2210void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2211 LOperand* left = instr->left();
2212 LOperand* right = instr->right();
2213 bool is_unsigned =
2214 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2215 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2216 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2217
2218 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2219 // We can statically evaluate the comparison.
2220 double left_val = ToDouble(LConstantOperand::cast(left));
2221 double right_val = ToDouble(LConstantOperand::cast(right));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002222 int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2223 ? instr->TrueDestination(chunk_)
2224 : instr->FalseDestination(chunk_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002225 EmitGoto(next_block);
2226 } else {
2227 if (instr->is_double()) {
2228 // Compare left and right operands as doubles and load the
2229 // resulting flags into the normal status register.
2230 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2231 // If a NaN is involved, i.e. the result is unordered (V set),
2232 // jump to false block label.
2233 __ b(vs, instr->FalseLabel(chunk_));
2234 } else {
2235 if (right->IsConstantOperand()) {
2236 int32_t value = ToInteger32(LConstantOperand::cast(right));
2237 if (instr->hydrogen_value()->representation().IsSmi()) {
2238 __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
2239 } else {
2240 __ cmp(ToRegister(left), Operand(value));
2241 }
2242 } else if (left->IsConstantOperand()) {
2243 int32_t value = ToInteger32(LConstantOperand::cast(left));
2244 if (instr->hydrogen_value()->representation().IsSmi()) {
2245 __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
2246 } else {
2247 __ cmp(ToRegister(right), Operand(value));
2248 }
2249 // We commuted the operands, so commute the condition.
2250 cond = CommuteCondition(cond);
2251 } else {
2252 __ cmp(ToRegister(left), ToRegister(right));
2253 }
2254 }
2255 EmitBranch(instr, cond);
2256 }
2257}
2258
2259
2260void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2261 Register left = ToRegister(instr->left());
2262 Register right = ToRegister(instr->right());
2263
2264 __ cmp(left, Operand(right));
2265 EmitBranch(instr, eq);
2266}
2267
2268
2269void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2270 if (instr->hydrogen()->representation().IsTagged()) {
2271 Register input_reg = ToRegister(instr->object());
2272 __ mov(ip, Operand(factory()->the_hole_value()));
2273 __ cmp(input_reg, ip);
2274 EmitBranch(instr, eq);
2275 return;
2276 }
2277
2278 DwVfpRegister input_reg = ToDoubleRegister(instr->object());
2279 __ VFPCompareAndSetFlags(input_reg, input_reg);
2280 EmitFalseBranch(instr, vc);
2281
2282 Register scratch = scratch0();
2283 __ VmovHigh(scratch, input_reg);
2284 __ cmp(scratch, Operand(kHoleNanUpper32));
2285 EmitBranch(instr, eq);
2286}
2287
2288
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002289Condition LCodeGen::EmitIsString(Register input,
2290 Register temp1,
2291 Label* is_not_string,
2292 SmiCheck check_needed = INLINE_SMI_CHECK) {
2293 if (check_needed == INLINE_SMI_CHECK) {
2294 __ JumpIfSmi(input, is_not_string);
2295 }
2296 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2297
2298 return lt;
2299}
2300
2301
2302void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2303 Register reg = ToRegister(instr->value());
2304 Register temp1 = ToRegister(instr->temp());
2305
2306 SmiCheck check_needed =
2307 instr->hydrogen()->value()->type().IsHeapObject()
2308 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2309 Condition true_cond =
2310 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2311
2312 EmitBranch(instr, true_cond);
2313}
2314
2315
2316void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2317 Register input_reg = EmitLoadRegister(instr->value(), ip);
2318 __ SmiTst(input_reg);
2319 EmitBranch(instr, eq);
2320}
2321
2322
2323void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2324 Register input = ToRegister(instr->value());
2325 Register temp = ToRegister(instr->temp());
2326
2327 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2328 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2329 }
2330 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2331 __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2332 __ tst(temp, Operand(1 << Map::kIsUndetectable));
2333 EmitBranch(instr, ne);
2334}
2335
2336
2337static Condition ComputeCompareCondition(Token::Value op) {
2338 switch (op) {
2339 case Token::EQ_STRICT:
2340 case Token::EQ:
2341 return eq;
2342 case Token::LT:
2343 return lt;
2344 case Token::GT:
2345 return gt;
2346 case Token::LTE:
2347 return le;
2348 case Token::GTE:
2349 return ge;
2350 default:
2351 UNREACHABLE();
2352 return kNoCondition;
2353 }
2354}
2355
2356
2357void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2358 DCHECK(ToRegister(instr->context()).is(cp));
2359 DCHECK(ToRegister(instr->left()).is(r1));
2360 DCHECK(ToRegister(instr->right()).is(r0));
2361
Ben Murdochda12d292016-06-02 14:46:10 +01002362 Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002363 CallCode(code, RelocInfo::CODE_TARGET, instr);
Ben Murdochda12d292016-06-02 14:46:10 +01002364 __ CompareRoot(r0, Heap::kTrueValueRootIndex);
2365 EmitBranch(instr, eq);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002366}
2367
2368
2369static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2370 InstanceType from = instr->from();
2371 InstanceType to = instr->to();
2372 if (from == FIRST_TYPE) return to;
2373 DCHECK(from == to || to == LAST_TYPE);
2374 return from;
2375}
2376
2377
2378static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2379 InstanceType from = instr->from();
2380 InstanceType to = instr->to();
2381 if (from == to) return eq;
2382 if (to == LAST_TYPE) return hs;
2383 if (from == FIRST_TYPE) return ls;
2384 UNREACHABLE();
2385 return eq;
2386}
2387
2388
2389void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2390 Register scratch = scratch0();
2391 Register input = ToRegister(instr->value());
2392
2393 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2394 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2395 }
2396
2397 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2398 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2399}
2400
2401
2402void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2403 Register input = ToRegister(instr->value());
2404 Register result = ToRegister(instr->result());
2405
2406 __ AssertString(input);
2407
2408 __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2409 __ IndexFromHash(result, result);
2410}
2411
2412
2413void LCodeGen::DoHasCachedArrayIndexAndBranch(
2414 LHasCachedArrayIndexAndBranch* instr) {
2415 Register input = ToRegister(instr->value());
2416 Register scratch = scratch0();
2417
2418 __ ldr(scratch,
2419 FieldMemOperand(input, String::kHashFieldOffset));
2420 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2421 EmitBranch(instr, eq);
2422}
2423
2424
2425// Branches to a label or falls through with the answer in flags. Trashes
2426// the temp registers, but not the input.
2427void LCodeGen::EmitClassOfTest(Label* is_true,
2428 Label* is_false,
2429 Handle<String>class_name,
2430 Register input,
2431 Register temp,
2432 Register temp2) {
2433 DCHECK(!input.is(temp));
2434 DCHECK(!input.is(temp2));
2435 DCHECK(!temp.is(temp2));
2436
2437 __ JumpIfSmi(input, is_false);
2438
Ben Murdochda12d292016-06-02 14:46:10 +01002439 __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE);
2440 STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002441 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002442 __ b(hs, is_true);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002443 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01002444 __ b(hs, is_false);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002445 }
2446
2447 // Check if the constructor in the map is a function.
2448 Register instance_type = ip;
2449 __ GetMapConstructor(temp, temp, temp2, instance_type);
2450
2451 // Objects with a non-function constructor have class 'Object'.
2452 __ cmp(instance_type, Operand(JS_FUNCTION_TYPE));
2453 if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
2454 __ b(ne, is_true);
2455 } else {
2456 __ b(ne, is_false);
2457 }
2458
2459 // temp now contains the constructor function. Grab the
2460 // instance class name from there.
2461 __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2462 __ ldr(temp, FieldMemOperand(temp,
2463 SharedFunctionInfo::kInstanceClassNameOffset));
2464 // The class name we are testing against is internalized since it's a literal.
2465 // The name in the constructor is internalized because of the way the context
2466 // is booted. This routine isn't expected to work for random API-created
2467 // classes and it doesn't have to because you can't access it with natives
2468 // syntax. Since both sides are internalized it is sufficient to use an
2469 // identity comparison.
2470 __ cmp(temp, Operand(class_name));
2471 // End with the answer in flags.
2472}
2473
2474
2475void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2476 Register input = ToRegister(instr->value());
2477 Register temp = scratch0();
2478 Register temp2 = ToRegister(instr->temp());
2479 Handle<String> class_name = instr->hydrogen()->class_name();
2480
2481 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2482 class_name, input, temp, temp2);
2483
2484 EmitBranch(instr, eq);
2485}
2486
2487
2488void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2489 Register reg = ToRegister(instr->value());
2490 Register temp = ToRegister(instr->temp());
2491
2492 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2493 __ cmp(temp, Operand(instr->map()));
2494 EmitBranch(instr, eq);
2495}
2496
2497
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002498void LCodeGen::DoHasInPrototypeChainAndBranch(
2499 LHasInPrototypeChainAndBranch* instr) {
2500 Register const object = ToRegister(instr->object());
2501 Register const object_map = scratch0();
2502 Register const object_instance_type = ip;
2503 Register const object_prototype = object_map;
2504 Register const prototype = ToRegister(instr->prototype());
2505
2506 // The {object} must be a spec object. It's sufficient to know that {object}
2507 // is not a smi, since all other non-spec objects have {null} prototypes and
2508 // will be ruled out below.
2509 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2510 __ SmiTst(object);
2511 EmitFalseBranch(instr, eq);
2512 }
2513
2514 // Loop through the {object}s prototype chain looking for the {prototype}.
2515 __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2516 Label loop;
2517 __ bind(&loop);
2518
2519 // Deoptimize if the object needs to be access checked.
2520 __ ldrb(object_instance_type,
2521 FieldMemOperand(object_map, Map::kBitFieldOffset));
2522 __ tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
2523 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck);
2524 // Deoptimize for proxies.
2525 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
2526 DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
2527
2528 __ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
2529 __ cmp(object_prototype, prototype);
2530 EmitTrueBranch(instr, eq);
2531 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
2532 EmitFalseBranch(instr, eq);
2533 __ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
2534 __ b(&loop);
2535}
2536
2537
2538void LCodeGen::DoCmpT(LCmpT* instr) {
2539 DCHECK(ToRegister(instr->context()).is(cp));
2540 Token::Value op = instr->op();
2541
Ben Murdoch097c5b22016-05-18 11:27:45 +01002542 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002543 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2544 // This instruction also signals no smi code inlined.
2545 __ cmp(r0, Operand::Zero());
2546
2547 Condition condition = ComputeCompareCondition(op);
2548 __ LoadRoot(ToRegister(instr->result()),
2549 Heap::kTrueValueRootIndex,
2550 condition);
2551 __ LoadRoot(ToRegister(instr->result()),
2552 Heap::kFalseValueRootIndex,
2553 NegateCondition(condition));
2554}
2555
2556
2557void LCodeGen::DoReturn(LReturn* instr) {
2558 if (FLAG_trace && info()->IsOptimizing()) {
2559 // Push the return value on the stack as the parameter.
2560 // Runtime::TraceExit returns its parameter in r0. We're leaving the code
2561 // managed by the register allocator and tearing down the frame, it's
2562 // safe to write to the context register.
2563 __ push(r0);
2564 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2565 __ CallRuntime(Runtime::kTraceExit);
2566 }
2567 if (info()->saves_caller_doubles()) {
2568 RestoreCallerDoubles();
2569 }
2570 if (NeedsEagerFrame()) {
2571 masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2572 }
2573 { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
2574 if (instr->has_constant_parameter_count()) {
2575 int parameter_count = ToInteger32(instr->constant_parameter_count());
2576 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2577 if (sp_delta != 0) {
2578 __ add(sp, sp, Operand(sp_delta));
2579 }
2580 } else {
2581 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2582 Register reg = ToRegister(instr->parameter_count());
2583 // The argument count parameter is a smi
2584 __ SmiUntag(reg);
2585 __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
2586 }
2587
2588 __ Jump(lr);
2589 }
2590}
2591
2592
2593template <class T>
2594void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2595 Register vector_register = ToRegister(instr->temp_vector());
2596 Register slot_register = LoadDescriptor::SlotRegister();
2597 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2598 DCHECK(slot_register.is(r0));
2599
2600 AllowDeferredHandleDereference vector_structure_check;
2601 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2602 __ Move(vector_register, vector);
2603 // No need to allocate this register.
2604 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2605 int index = vector->GetIndex(slot);
2606 __ mov(slot_register, Operand(Smi::FromInt(index)));
2607}
2608
2609
2610template <class T>
2611void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2612 Register vector_register = ToRegister(instr->temp_vector());
2613 Register slot_register = ToRegister(instr->temp_slot());
2614
2615 AllowDeferredHandleDereference vector_structure_check;
2616 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2617 __ Move(vector_register, vector);
2618 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2619 int index = vector->GetIndex(slot);
2620 __ mov(slot_register, Operand(Smi::FromInt(index)));
2621}
2622
2623
2624void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2625 DCHECK(ToRegister(instr->context()).is(cp));
2626 DCHECK(ToRegister(instr->global_object())
2627 .is(LoadDescriptor::ReceiverRegister()));
2628 DCHECK(ToRegister(instr->result()).is(r0));
2629
2630 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
2631 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002632 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2633 isolate(), instr->typeof_mode(), PREMONOMORPHIC)
2634 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002635 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2636}
2637
2638
2639void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2640 Register context = ToRegister(instr->context());
2641 Register result = ToRegister(instr->result());
2642 __ ldr(result, ContextMemOperand(context, instr->slot_index()));
2643 if (instr->hydrogen()->RequiresHoleCheck()) {
2644 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2645 __ cmp(result, ip);
2646 if (instr->hydrogen()->DeoptimizesOnHole()) {
2647 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2648 } else {
2649 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
2650 }
2651 }
2652}
2653
2654
2655void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2656 Register context = ToRegister(instr->context());
2657 Register value = ToRegister(instr->value());
2658 Register scratch = scratch0();
2659 MemOperand target = ContextMemOperand(context, instr->slot_index());
2660
2661 Label skip_assignment;
2662
2663 if (instr->hydrogen()->RequiresHoleCheck()) {
2664 __ ldr(scratch, target);
2665 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2666 __ cmp(scratch, ip);
2667 if (instr->hydrogen()->DeoptimizesOnHole()) {
2668 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2669 } else {
2670 __ b(ne, &skip_assignment);
2671 }
2672 }
2673
2674 __ str(value, target);
2675 if (instr->hydrogen()->NeedsWriteBarrier()) {
2676 SmiCheck check_needed =
2677 instr->hydrogen()->value()->type().IsHeapObject()
2678 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2679 __ RecordWriteContextSlot(context,
2680 target.offset(),
2681 value,
2682 scratch,
2683 GetLinkRegisterState(),
2684 kSaveFPRegs,
2685 EMIT_REMEMBERED_SET,
2686 check_needed);
2687 }
2688
2689 __ bind(&skip_assignment);
2690}
2691
2692
2693void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2694 HObjectAccess access = instr->hydrogen()->access();
2695 int offset = access.offset();
2696 Register object = ToRegister(instr->object());
2697
2698 if (access.IsExternalMemory()) {
2699 Register result = ToRegister(instr->result());
2700 MemOperand operand = MemOperand(object, offset);
2701 __ Load(result, operand, access.representation());
2702 return;
2703 }
2704
2705 if (instr->hydrogen()->representation().IsDouble()) {
2706 DwVfpRegister result = ToDoubleRegister(instr->result());
2707 __ vldr(result, FieldMemOperand(object, offset));
2708 return;
2709 }
2710
2711 Register result = ToRegister(instr->result());
2712 if (!access.IsInobject()) {
2713 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2714 object = result;
2715 }
2716 MemOperand operand = FieldMemOperand(object, offset);
2717 __ Load(result, operand, access.representation());
2718}
2719
2720
2721void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2722 DCHECK(ToRegister(instr->context()).is(cp));
2723 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2724 DCHECK(ToRegister(instr->result()).is(r0));
2725
2726 // Name is always in r2.
2727 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
2728 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002729 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2730 isolate(), NOT_INSIDE_TYPEOF,
2731 instr->hydrogen()->initialization_state())
2732 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002733 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
2734}
2735
2736
2737void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2738 Register scratch = scratch0();
2739 Register function = ToRegister(instr->function());
2740 Register result = ToRegister(instr->result());
2741
2742 // Get the prototype or initial map from the function.
2743 __ ldr(result,
2744 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2745
2746 // Check that the function has a prototype or an initial map.
2747 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2748 __ cmp(result, ip);
2749 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2750
2751 // If the function does not have an initial map, we're done.
2752 Label done;
2753 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2754 __ b(ne, &done);
2755
2756 // Get the prototype from the initial map.
2757 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2758
2759 // All done.
2760 __ bind(&done);
2761}
2762
2763
2764void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2765 Register result = ToRegister(instr->result());
2766 __ LoadRoot(result, instr->index());
2767}
2768
2769
2770void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2771 Register arguments = ToRegister(instr->arguments());
2772 Register result = ToRegister(instr->result());
2773 // There are two words between the frame pointer and the last argument.
2774 // Subtracting from length accounts for one of them add one more.
2775 if (instr->length()->IsConstantOperand()) {
2776 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2777 if (instr->index()->IsConstantOperand()) {
2778 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2779 int index = (const_length - const_index) + 1;
2780 __ ldr(result, MemOperand(arguments, index * kPointerSize));
2781 } else {
2782 Register index = ToRegister(instr->index());
2783 __ rsb(result, index, Operand(const_length + 1));
2784 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
2785 }
2786 } else if (instr->index()->IsConstantOperand()) {
2787 Register length = ToRegister(instr->length());
2788 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2789 int loc = const_index - 1;
2790 if (loc != 0) {
2791 __ sub(result, length, Operand(loc));
2792 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
2793 } else {
2794 __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
2795 }
2796 } else {
2797 Register length = ToRegister(instr->length());
2798 Register index = ToRegister(instr->index());
2799 __ sub(result, length, index);
2800 __ add(result, result, Operand(1));
2801 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
2802 }
2803}
2804
2805
2806void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2807 Register external_pointer = ToRegister(instr->elements());
2808 Register key = no_reg;
2809 ElementsKind elements_kind = instr->elements_kind();
2810 bool key_is_constant = instr->key()->IsConstantOperand();
2811 int constant_key = 0;
2812 if (key_is_constant) {
2813 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2814 if (constant_key & 0xF0000000) {
2815 Abort(kArrayIndexConstantValueTooBig);
2816 }
2817 } else {
2818 key = ToRegister(instr->key());
2819 }
2820 int element_size_shift = ElementsKindToShiftSize(elements_kind);
2821 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2822 ? (element_size_shift - kSmiTagSize) : element_size_shift;
2823 int base_offset = instr->base_offset();
2824
2825 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2826 DwVfpRegister result = ToDoubleRegister(instr->result());
2827 Operand operand = key_is_constant
2828 ? Operand(constant_key << element_size_shift)
2829 : Operand(key, LSL, shift_size);
2830 __ add(scratch0(), external_pointer, operand);
2831 if (elements_kind == FLOAT32_ELEMENTS) {
2832 __ vldr(double_scratch0().low(), scratch0(), base_offset);
2833 __ vcvt_f64_f32(result, double_scratch0().low());
2834 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2835 __ vldr(result, scratch0(), base_offset);
2836 }
2837 } else {
2838 Register result = ToRegister(instr->result());
2839 MemOperand mem_operand = PrepareKeyedOperand(
2840 key, external_pointer, key_is_constant, constant_key,
2841 element_size_shift, shift_size, base_offset);
2842 switch (elements_kind) {
2843 case INT8_ELEMENTS:
2844 __ ldrsb(result, mem_operand);
2845 break;
2846 case UINT8_ELEMENTS:
2847 case UINT8_CLAMPED_ELEMENTS:
2848 __ ldrb(result, mem_operand);
2849 break;
2850 case INT16_ELEMENTS:
2851 __ ldrsh(result, mem_operand);
2852 break;
2853 case UINT16_ELEMENTS:
2854 __ ldrh(result, mem_operand);
2855 break;
2856 case INT32_ELEMENTS:
2857 __ ldr(result, mem_operand);
2858 break;
2859 case UINT32_ELEMENTS:
2860 __ ldr(result, mem_operand);
2861 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2862 __ cmp(result, Operand(0x80000000));
2863 DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue);
2864 }
2865 break;
2866 case FLOAT32_ELEMENTS:
2867 case FLOAT64_ELEMENTS:
2868 case FAST_HOLEY_DOUBLE_ELEMENTS:
2869 case FAST_HOLEY_ELEMENTS:
2870 case FAST_HOLEY_SMI_ELEMENTS:
2871 case FAST_DOUBLE_ELEMENTS:
2872 case FAST_ELEMENTS:
2873 case FAST_SMI_ELEMENTS:
2874 case DICTIONARY_ELEMENTS:
2875 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2876 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002877 case FAST_STRING_WRAPPER_ELEMENTS:
2878 case SLOW_STRING_WRAPPER_ELEMENTS:
2879 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002880 UNREACHABLE();
2881 break;
2882 }
2883 }
2884}
2885
2886
2887void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2888 Register elements = ToRegister(instr->elements());
2889 bool key_is_constant = instr->key()->IsConstantOperand();
2890 Register key = no_reg;
2891 DwVfpRegister result = ToDoubleRegister(instr->result());
2892 Register scratch = scratch0();
2893
2894 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2895
2896 int base_offset = instr->base_offset();
2897 if (key_is_constant) {
2898 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2899 if (constant_key & 0xF0000000) {
2900 Abort(kArrayIndexConstantValueTooBig);
2901 }
2902 base_offset += constant_key * kDoubleSize;
2903 }
2904 __ add(scratch, elements, Operand(base_offset));
2905
2906 if (!key_is_constant) {
2907 key = ToRegister(instr->key());
2908 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2909 ? (element_size_shift - kSmiTagSize) : element_size_shift;
2910 __ add(scratch, scratch, Operand(key, LSL, shift_size));
2911 }
2912
2913 __ vldr(result, scratch, 0);
2914
2915 if (instr->hydrogen()->RequiresHoleCheck()) {
2916 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
2917 __ cmp(scratch, Operand(kHoleNanUpper32));
2918 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2919 }
2920}
2921
2922
2923void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2924 Register elements = ToRegister(instr->elements());
2925 Register result = ToRegister(instr->result());
2926 Register scratch = scratch0();
2927 Register store_base = scratch;
2928 int offset = instr->base_offset();
2929
2930 if (instr->key()->IsConstantOperand()) {
2931 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2932 offset += ToInteger32(const_operand) * kPointerSize;
2933 store_base = elements;
2934 } else {
2935 Register key = ToRegister(instr->key());
2936 // Even though the HLoadKeyed instruction forces the input
2937 // representation for the key to be an integer, the input gets replaced
2938 // during bound check elimination with the index argument to the bounds
2939 // check, which can be tagged, so that case must be handled here, too.
2940 if (instr->hydrogen()->key()->representation().IsSmi()) {
2941 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
2942 } else {
2943 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
2944 }
2945 }
2946 __ ldr(result, MemOperand(store_base, offset));
2947
2948 // Check for the hole value.
2949 if (instr->hydrogen()->RequiresHoleCheck()) {
2950 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2951 __ SmiTst(result);
2952 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
2953 } else {
2954 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2955 __ cmp(result, scratch);
2956 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2957 }
2958 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
2959 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
2960 Label done;
2961 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2962 __ cmp(result, scratch);
2963 __ b(ne, &done);
2964 if (info()->IsStub()) {
2965 // A stub can safely convert the hole to undefined only if the array
2966 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
2967 // it needs to bail out.
2968 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
2969 __ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
2970 __ cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
2971 DeoptimizeIf(ne, instr, Deoptimizer::kHole);
2972 }
2973 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2974 __ bind(&done);
2975 }
2976}
2977
2978
2979void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
2980 if (instr->is_fixed_typed_array()) {
2981 DoLoadKeyedExternalArray(instr);
2982 } else if (instr->hydrogen()->representation().IsDouble()) {
2983 DoLoadKeyedFixedDoubleArray(instr);
2984 } else {
2985 DoLoadKeyedFixedArray(instr);
2986 }
2987}
2988
2989
2990MemOperand LCodeGen::PrepareKeyedOperand(Register key,
2991 Register base,
2992 bool key_is_constant,
2993 int constant_key,
2994 int element_size,
2995 int shift_size,
2996 int base_offset) {
2997 if (key_is_constant) {
2998 return MemOperand(base, (constant_key << element_size) + base_offset);
2999 }
3000
3001 if (base_offset == 0) {
3002 if (shift_size >= 0) {
3003 return MemOperand(base, key, LSL, shift_size);
3004 } else {
3005 DCHECK_EQ(-1, shift_size);
3006 return MemOperand(base, key, LSR, 1);
3007 }
3008 }
3009
3010 if (shift_size >= 0) {
3011 __ add(scratch0(), base, Operand(key, LSL, shift_size));
3012 return MemOperand(scratch0(), base_offset);
3013 } else {
3014 DCHECK_EQ(-1, shift_size);
3015 __ add(scratch0(), base, Operand(key, ASR, 1));
3016 return MemOperand(scratch0(), base_offset);
3017 }
3018}
3019
3020
3021void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3022 DCHECK(ToRegister(instr->context()).is(cp));
3023 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3024 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3025
3026 if (instr->hydrogen()->HasVectorAndSlot()) {
3027 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3028 }
3029
3030 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
Ben Murdoch097c5b22016-05-18 11:27:45 +01003031 isolate(), instr->hydrogen()->initialization_state())
3032 .code();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003033 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3034}
3035
3036
3037void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3038 Register scratch = scratch0();
3039 Register result = ToRegister(instr->result());
3040
3041 if (instr->hydrogen()->from_inlined()) {
3042 __ sub(result, sp, Operand(2 * kPointerSize));
Ben Murdochda12d292016-06-02 14:46:10 +01003043 } else if (instr->hydrogen()->arguments_adaptor()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003044 // Check if the calling frame is an arguments adaptor frame.
3045 Label done, adapted;
3046 __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
Ben Murdochda12d292016-06-02 14:46:10 +01003047 __ ldr(result, MemOperand(scratch,
3048 CommonFrameConstants::kContextOrFrameTypeOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003049 __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3050
3051 // Result is the frame pointer for the frame if not adapted and for the real
3052 // frame below the adaptor frame if adapted.
3053 __ mov(result, fp, LeaveCC, ne);
3054 __ mov(result, scratch, LeaveCC, eq);
Ben Murdochda12d292016-06-02 14:46:10 +01003055 } else {
3056 __ mov(result, fp);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003057 }
3058}
3059
3060
3061void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3062 Register elem = ToRegister(instr->elements());
3063 Register result = ToRegister(instr->result());
3064
3065 Label done;
3066
3067 // If no arguments adaptor frame the number of arguments is fixed.
3068 __ cmp(fp, elem);
3069 __ mov(result, Operand(scope()->num_parameters()));
3070 __ b(eq, &done);
3071
3072 // Arguments adaptor frame present. Get argument length from there.
3073 __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3074 __ ldr(result,
3075 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3076 __ SmiUntag(result);
3077
3078 // Argument length is in result register.
3079 __ bind(&done);
3080}
3081
3082
3083void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3084 Register receiver = ToRegister(instr->receiver());
3085 Register function = ToRegister(instr->function());
3086 Register result = ToRegister(instr->result());
3087 Register scratch = scratch0();
3088
3089 // If the receiver is null or undefined, we have to pass the global
3090 // object as a receiver to normal functions. Values have to be
3091 // passed unchanged to builtins and strict-mode functions.
3092 Label global_object, result_in_receiver;
3093
3094 if (!instr->hydrogen()->known_function()) {
3095 // Do not transform the receiver to object for strict mode
3096 // functions.
3097 __ ldr(scratch,
3098 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3099 __ ldr(scratch,
3100 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3101 int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3102 __ tst(scratch, Operand(mask));
3103 __ b(ne, &result_in_receiver);
3104
3105 // Do not transform the receiver to object for builtins.
3106 __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
3107 __ b(ne, &result_in_receiver);
3108 }
3109
3110 // Normal function. Replace undefined or null with global receiver.
3111 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3112 __ cmp(receiver, scratch);
3113 __ b(eq, &global_object);
3114 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3115 __ cmp(receiver, scratch);
3116 __ b(eq, &global_object);
3117
3118 // Deoptimize if the receiver is not a JS object.
3119 __ SmiTst(receiver);
3120 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
3121 __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
3122 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
3123
3124 __ b(&result_in_receiver);
3125 __ bind(&global_object);
3126 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
3127 __ ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3128 __ ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3129
3130 if (result.is(receiver)) {
3131 __ bind(&result_in_receiver);
3132 } else {
3133 Label result_ok;
3134 __ b(&result_ok);
3135 __ bind(&result_in_receiver);
3136 __ mov(result, receiver);
3137 __ bind(&result_ok);
3138 }
3139}
3140
3141
3142void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3143 Register receiver = ToRegister(instr->receiver());
3144 Register function = ToRegister(instr->function());
3145 Register length = ToRegister(instr->length());
3146 Register elements = ToRegister(instr->elements());
3147 Register scratch = scratch0();
3148 DCHECK(receiver.is(r0)); // Used for parameter count.
3149 DCHECK(function.is(r1)); // Required by InvokeFunction.
3150 DCHECK(ToRegister(instr->result()).is(r0));
3151
3152 // Copy the arguments to this function possibly from the
3153 // adaptor frame below it.
3154 const uint32_t kArgumentsLimit = 1 * KB;
3155 __ cmp(length, Operand(kArgumentsLimit));
3156 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
3157
3158 // Push the receiver and use the register to keep the original
3159 // number of arguments.
3160 __ push(receiver);
3161 __ mov(receiver, length);
3162 // The arguments are at a one pointer size offset from elements.
3163 __ add(elements, elements, Operand(1 * kPointerSize));
3164
3165 // Loop through the arguments pushing them onto the execution
3166 // stack.
3167 Label invoke, loop;
3168 // length is a small non-negative integer, due to the test above.
3169 __ cmp(length, Operand::Zero());
3170 __ b(eq, &invoke);
3171 __ bind(&loop);
3172 __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3173 __ push(scratch);
3174 __ sub(length, length, Operand(1), SetCC);
3175 __ b(ne, &loop);
3176
3177 __ bind(&invoke);
Ben Murdochda12d292016-06-02 14:46:10 +01003178
3179 InvokeFlag flag = CALL_FUNCTION;
3180 if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
3181 DCHECK(!info()->saves_caller_doubles());
3182 // TODO(ishell): drop current frame before pushing arguments to the stack.
3183 flag = JUMP_FUNCTION;
3184 ParameterCount actual(r0);
3185 // It is safe to use r3, r4 and r5 as scratch registers here given that
3186 // 1) we are not going to return to caller function anyway,
3187 // 2) r3 (new.target) will be initialized below.
3188 PrepareForTailCall(actual, r3, r4, r5);
3189 }
3190
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003191 DCHECK(instr->HasPointerMap());
3192 LPointerMap* pointers = instr->pointer_map();
Ben Murdochda12d292016-06-02 14:46:10 +01003193 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003194 // The number of arguments is stored in receiver which is r0, as expected
3195 // by InvokeFunction.
3196 ParameterCount actual(receiver);
Ben Murdochda12d292016-06-02 14:46:10 +01003197 __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003198}
3199
3200
3201void LCodeGen::DoPushArgument(LPushArgument* instr) {
3202 LOperand* argument = instr->value();
3203 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3204 Abort(kDoPushArgumentNotImplementedForDoubleType);
3205 } else {
3206 Register argument_reg = EmitLoadRegister(argument, ip);
3207 __ push(argument_reg);
3208 }
3209}
3210
3211
3212void LCodeGen::DoDrop(LDrop* instr) {
3213 __ Drop(instr->count());
3214}
3215
3216
3217void LCodeGen::DoThisFunction(LThisFunction* instr) {
3218 Register result = ToRegister(instr->result());
3219 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3220}
3221
3222
3223void LCodeGen::DoContext(LContext* instr) {
3224 // If there is a non-return use, the context must be moved to a register.
3225 Register result = ToRegister(instr->result());
3226 if (info()->IsOptimizing()) {
3227 __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3228 } else {
3229 // If there is no frame, the context must be in cp.
3230 DCHECK(result.is(cp));
3231 }
3232}
3233
3234
3235void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3236 DCHECK(ToRegister(instr->context()).is(cp));
3237 __ Move(scratch0(), instr->hydrogen()->pairs());
3238 __ push(scratch0());
3239 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3240 __ push(scratch0());
3241 CallRuntime(Runtime::kDeclareGlobals, instr);
3242}
3243
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003244void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3245 int formal_parameter_count, int arity,
Ben Murdochda12d292016-06-02 14:46:10 +01003246 bool is_tail_call, LInstruction* instr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003247 bool dont_adapt_arguments =
3248 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3249 bool can_invoke_directly =
3250 dont_adapt_arguments || formal_parameter_count == arity;
3251
3252 Register function_reg = r1;
3253
3254 LPointerMap* pointers = instr->pointer_map();
3255
3256 if (can_invoke_directly) {
3257 // Change context.
3258 __ ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3259
3260 // Always initialize new target and number of actual arguments.
3261 __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
3262 __ mov(r0, Operand(arity));
3263
Ben Murdochda12d292016-06-02 14:46:10 +01003264 bool is_self_call = function.is_identical_to(info()->closure());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003265
Ben Murdochda12d292016-06-02 14:46:10 +01003266 // Invoke function.
3267 if (is_self_call) {
3268 Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
3269 if (is_tail_call) {
3270 __ Jump(self, RelocInfo::CODE_TARGET);
3271 } else {
3272 __ Call(self, RelocInfo::CODE_TARGET);
3273 }
3274 } else {
3275 __ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3276 if (is_tail_call) {
3277 __ Jump(ip);
3278 } else {
3279 __ Call(ip);
3280 }
3281 }
3282
3283 if (!is_tail_call) {
3284 // Set up deoptimization.
3285 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3286 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003287 } else {
3288 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdochda12d292016-06-02 14:46:10 +01003289 ParameterCount actual(arity);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003290 ParameterCount expected(formal_parameter_count);
Ben Murdochda12d292016-06-02 14:46:10 +01003291 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3292 __ InvokeFunction(function_reg, expected, actual, flag, generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003293 }
3294}
3295
3296
3297void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3298 DCHECK(instr->context() != NULL);
3299 DCHECK(ToRegister(instr->context()).is(cp));
3300 Register input = ToRegister(instr->value());
3301 Register result = ToRegister(instr->result());
3302 Register scratch = scratch0();
3303
3304 // Deoptimize if not a heap number.
3305 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3306 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3307 __ cmp(scratch, Operand(ip));
3308 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3309
3310 Label done;
3311 Register exponent = scratch0();
3312 scratch = no_reg;
3313 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3314 // Check the sign of the argument. If the argument is positive, just
3315 // return it.
3316 __ tst(exponent, Operand(HeapNumber::kSignMask));
3317 // Move the input to the result if necessary.
3318 __ Move(result, input);
3319 __ b(eq, &done);
3320
3321 // Input is negative. Reverse its sign.
3322 // Preserve the value of all registers.
3323 {
3324 PushSafepointRegistersScope scope(this);
3325
3326 // Registers were saved at the safepoint, so we can use
3327 // many scratch registers.
3328 Register tmp1 = input.is(r1) ? r0 : r1;
3329 Register tmp2 = input.is(r2) ? r0 : r2;
3330 Register tmp3 = input.is(r3) ? r0 : r3;
3331 Register tmp4 = input.is(r4) ? r0 : r4;
3332
3333 // exponent: floating point exponent value.
3334
3335 Label allocated, slow;
3336 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3337 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3338 __ b(&allocated);
3339
3340 // Slow case: Call the runtime system to do the number allocation.
3341 __ bind(&slow);
3342
3343 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3344 instr->context());
3345 // Set the pointer to the new heap number in tmp.
3346 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3347 // Restore input_reg after call to runtime.
3348 __ LoadFromSafepointRegisterSlot(input, input);
3349 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3350
3351 __ bind(&allocated);
3352 // exponent: floating point exponent value.
3353 // tmp1: allocated heap number.
3354 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3355 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3356 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3357 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3358
3359 __ StoreToSafepointRegisterSlot(tmp1, result);
3360 }
3361
3362 __ bind(&done);
3363}
3364
3365
3366void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3367 Register input = ToRegister(instr->value());
3368 Register result = ToRegister(instr->result());
3369 __ cmp(input, Operand::Zero());
3370 __ Move(result, input, pl);
3371 // We can make rsb conditional because the previous cmp instruction
3372 // will clear the V (overflow) flag and rsb won't set this flag
3373 // if input is positive.
3374 __ rsb(result, input, Operand::Zero(), SetCC, mi);
3375 // Deoptimize on overflow.
3376 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
3377}
3378
3379
3380void LCodeGen::DoMathAbs(LMathAbs* instr) {
3381 // Class for deferred case.
3382 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3383 public:
3384 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3385 : LDeferredCode(codegen), instr_(instr) { }
3386 void Generate() override {
3387 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3388 }
3389 LInstruction* instr() override { return instr_; }
3390
3391 private:
3392 LMathAbs* instr_;
3393 };
3394
3395 Representation r = instr->hydrogen()->value()->representation();
3396 if (r.IsDouble()) {
3397 DwVfpRegister input = ToDoubleRegister(instr->value());
3398 DwVfpRegister result = ToDoubleRegister(instr->result());
3399 __ vabs(result, input);
3400 } else if (r.IsSmiOrInteger32()) {
3401 EmitIntegerMathAbs(instr);
3402 } else {
3403 // Representation is tagged.
3404 DeferredMathAbsTaggedHeapNumber* deferred =
3405 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3406 Register input = ToRegister(instr->value());
3407 // Smi check.
3408 __ JumpIfNotSmi(input, deferred->entry());
3409 // If smi, handle it directly.
3410 EmitIntegerMathAbs(instr);
3411 __ bind(deferred->exit());
3412 }
3413}
3414
3415
3416void LCodeGen::DoMathFloor(LMathFloor* instr) {
3417 DwVfpRegister input = ToDoubleRegister(instr->value());
3418 Register result = ToRegister(instr->result());
3419 Register input_high = scratch0();
3420 Label done, exact;
3421
3422 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
3423 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3424
3425 __ bind(&exact);
3426 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3427 // Test for -0.
3428 __ cmp(result, Operand::Zero());
3429 __ b(ne, &done);
3430 __ cmp(input_high, Operand::Zero());
3431 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
3432 }
3433 __ bind(&done);
3434}
3435
3436
3437void LCodeGen::DoMathRound(LMathRound* instr) {
3438 DwVfpRegister input = ToDoubleRegister(instr->value());
3439 Register result = ToRegister(instr->result());
3440 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3441 DwVfpRegister input_plus_dot_five = double_scratch1;
3442 Register input_high = scratch0();
3443 DwVfpRegister dot_five = double_scratch0();
3444 Label convert, done;
3445
3446 __ Vmov(dot_five, 0.5, scratch0());
3447 __ vabs(double_scratch1, input);
3448 __ VFPCompareAndSetFlags(double_scratch1, dot_five);
3449 // If input is in [-0.5, -0], the result is -0.
3450 // If input is in [+0, +0.5[, the result is +0.
3451 // If the input is +0.5, the result is 1.
3452 __ b(hi, &convert); // Out of [-0.5, +0.5].
3453 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3454 __ VmovHigh(input_high, input);
3455 __ cmp(input_high, Operand::Zero());
3456 // [-0.5, -0].
3457 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
3458 }
3459 __ VFPCompareAndSetFlags(input, dot_five);
3460 __ mov(result, Operand(1), LeaveCC, eq); // +0.5.
3461 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3462 // flag kBailoutOnMinusZero.
3463 __ mov(result, Operand::Zero(), LeaveCC, ne);
3464 __ b(&done);
3465
3466 __ bind(&convert);
3467 __ vadd(input_plus_dot_five, input, dot_five);
3468 // Reuse dot_five (double_scratch0) as we no longer need this value.
3469 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
3470 &done, &done);
3471 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3472 __ bind(&done);
3473}
3474
3475
3476void LCodeGen::DoMathFround(LMathFround* instr) {
3477 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
3478 DwVfpRegister output_reg = ToDoubleRegister(instr->result());
3479 LowDwVfpRegister scratch = double_scratch0();
3480 __ vcvt_f32_f64(scratch.low(), input_reg);
3481 __ vcvt_f64_f32(output_reg, scratch.low());
3482}
3483
3484
3485void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3486 DwVfpRegister input = ToDoubleRegister(instr->value());
3487 DwVfpRegister result = ToDoubleRegister(instr->result());
3488 __ vsqrt(result, input);
3489}
3490
3491
3492void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3493 DwVfpRegister input = ToDoubleRegister(instr->value());
3494 DwVfpRegister result = ToDoubleRegister(instr->result());
3495 DwVfpRegister temp = double_scratch0();
3496
3497 // Note that according to ECMA-262 15.8.2.13:
3498 // Math.pow(-Infinity, 0.5) == Infinity
3499 // Math.sqrt(-Infinity) == NaN
3500 Label done;
3501 __ vmov(temp, -V8_INFINITY, scratch0());
3502 __ VFPCompareAndSetFlags(input, temp);
3503 __ vneg(result, temp, eq);
3504 __ b(&done, eq);
3505
3506 // Add +0 to convert -0 to +0.
3507 __ vadd(result, input, kDoubleRegZero);
3508 __ vsqrt(result, result);
3509 __ bind(&done);
3510}
3511
3512
3513void LCodeGen::DoPower(LPower* instr) {
3514 Representation exponent_type = instr->hydrogen()->right()->representation();
3515 // Having marked this as a call, we can use any registers.
3516 // Just make sure that the input/output registers are the expected ones.
3517 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3518 DCHECK(!instr->right()->IsDoubleRegister() ||
3519 ToDoubleRegister(instr->right()).is(d1));
3520 DCHECK(!instr->right()->IsRegister() ||
3521 ToRegister(instr->right()).is(tagged_exponent));
3522 DCHECK(ToDoubleRegister(instr->left()).is(d0));
3523 DCHECK(ToDoubleRegister(instr->result()).is(d2));
3524
3525 if (exponent_type.IsSmi()) {
3526 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3527 __ CallStub(&stub);
3528 } else if (exponent_type.IsTagged()) {
3529 Label no_deopt;
3530 __ JumpIfSmi(tagged_exponent, &no_deopt);
3531 DCHECK(!r6.is(tagged_exponent));
3532 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3533 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3534 __ cmp(r6, Operand(ip));
3535 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3536 __ bind(&no_deopt);
3537 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3538 __ CallStub(&stub);
3539 } else if (exponent_type.IsInteger32()) {
3540 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3541 __ CallStub(&stub);
3542 } else {
3543 DCHECK(exponent_type.IsDouble());
3544 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3545 __ CallStub(&stub);
3546 }
3547}
3548
3549
3550void LCodeGen::DoMathExp(LMathExp* instr) {
3551 DwVfpRegister input = ToDoubleRegister(instr->value());
3552 DwVfpRegister result = ToDoubleRegister(instr->result());
3553 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3554 DwVfpRegister double_scratch2 = double_scratch0();
3555 Register temp1 = ToRegister(instr->temp1());
3556 Register temp2 = ToRegister(instr->temp2());
3557
3558 MathExpGenerator::EmitMathExp(
3559 masm(), input, result, double_scratch1, double_scratch2,
3560 temp1, temp2, scratch0());
3561}
3562
3563
3564void LCodeGen::DoMathLog(LMathLog* instr) {
3565 __ PrepareCallCFunction(0, 1, scratch0());
3566 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3567 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3568 0, 1);
3569 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3570}
3571
3572
3573void LCodeGen::DoMathClz32(LMathClz32* instr) {
3574 Register input = ToRegister(instr->value());
3575 Register result = ToRegister(instr->result());
3576 __ clz(result, input);
3577}
3578
Ben Murdochda12d292016-06-02 14:46:10 +01003579void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
3580 Register scratch1, Register scratch2,
3581 Register scratch3) {
3582#if DEBUG
3583 if (actual.is_reg()) {
3584 DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
3585 } else {
3586 DCHECK(!AreAliased(scratch1, scratch2, scratch3));
3587 }
3588#endif
3589 if (FLAG_code_comments) {
3590 if (actual.is_reg()) {
3591 Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
3592 } else {
3593 Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
3594 }
3595 }
3596
3597 // Check if next frame is an arguments adaptor frame.
3598 Register caller_args_count_reg = scratch1;
3599 Label no_arguments_adaptor, formal_parameter_count_loaded;
3600 __ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3601 __ ldr(scratch3,
3602 MemOperand(scratch2, StandardFrameConstants::kContextOffset));
3603 __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3604 __ b(ne, &no_arguments_adaptor);
3605
3606 // Drop current frame and load arguments count from arguments adaptor frame.
3607 __ mov(fp, scratch2);
3608 __ ldr(caller_args_count_reg,
3609 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
3610 __ SmiUntag(caller_args_count_reg);
3611 __ b(&formal_parameter_count_loaded);
3612
3613 __ bind(&no_arguments_adaptor);
3614 // Load caller's formal parameter count
3615 __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
3616
3617 __ bind(&formal_parameter_count_loaded);
3618 __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
3619
3620 Comment(";;; }");
3621}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003622
3623void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
Ben Murdochda12d292016-06-02 14:46:10 +01003624 HInvokeFunction* hinstr = instr->hydrogen();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003625 DCHECK(ToRegister(instr->context()).is(cp));
3626 DCHECK(ToRegister(instr->function()).is(r1));
3627 DCHECK(instr->HasPointerMap());
3628
Ben Murdochda12d292016-06-02 14:46:10 +01003629 bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
3630
3631 if (is_tail_call) {
3632 DCHECK(!info()->saves_caller_doubles());
3633 ParameterCount actual(instr->arity());
3634 // It is safe to use r3, r4 and r5 as scratch registers here given that
3635 // 1) we are not going to return to caller function anyway,
3636 // 2) r3 (new.target) will be initialized below.
3637 PrepareForTailCall(actual, r3, r4, r5);
3638 }
3639
3640 Handle<JSFunction> known_function = hinstr->known_function();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003641 if (known_function.is_null()) {
3642 LPointerMap* pointers = instr->pointer_map();
3643 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
Ben Murdochda12d292016-06-02 14:46:10 +01003644 ParameterCount actual(instr->arity());
3645 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3646 __ InvokeFunction(r1, no_reg, actual, flag, generator);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003647 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01003648 CallKnownFunction(known_function, hinstr->formal_parameter_count(),
3649 instr->arity(), is_tail_call, instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003650 }
3651}
3652
3653
3654void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3655 DCHECK(ToRegister(instr->result()).is(r0));
3656
3657 if (instr->hydrogen()->IsTailCall()) {
3658 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3659
3660 if (instr->target()->IsConstantOperand()) {
3661 LConstantOperand* target = LConstantOperand::cast(instr->target());
3662 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3663 __ Jump(code, RelocInfo::CODE_TARGET);
3664 } else {
3665 DCHECK(instr->target()->IsRegister());
3666 Register target = ToRegister(instr->target());
3667 // Make sure we don't emit any additional entries in the constant pool
3668 // before the call to ensure that the CallCodeSize() calculated the
3669 // correct
3670 // number of instructions for the constant pool load.
3671 {
3672 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
3673 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3674 }
3675 __ Jump(target);
3676 }
3677 } else {
3678 LPointerMap* pointers = instr->pointer_map();
3679 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3680
3681 if (instr->target()->IsConstantOperand()) {
3682 LConstantOperand* target = LConstantOperand::cast(instr->target());
3683 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3684 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3685 PlatformInterfaceDescriptor* call_descriptor =
3686 instr->descriptor().platform_specific_descriptor();
3687 if (call_descriptor != NULL) {
3688 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
3689 call_descriptor->storage_mode());
3690 } else {
3691 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al);
3692 }
3693 } else {
3694 DCHECK(instr->target()->IsRegister());
3695 Register target = ToRegister(instr->target());
3696 generator.BeforeCall(__ CallSize(target));
3697 // Make sure we don't emit any additional entries in the constant pool
3698 // before the call to ensure that the CallCodeSize() calculated the
3699 // correct
3700 // number of instructions for the constant pool load.
3701 {
3702 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
3703 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3704 }
3705 __ Call(target);
3706 }
3707 generator.AfterCall();
3708 }
3709}
3710
3711
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003712void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3713 DCHECK(ToRegister(instr->context()).is(cp));
3714 DCHECK(ToRegister(instr->constructor()).is(r1));
3715 DCHECK(ToRegister(instr->result()).is(r0));
3716
3717 __ mov(r0, Operand(instr->arity()));
3718 if (instr->arity() == 1) {
3719 // We only need the allocation site for the case we have a length argument.
3720 // The case may bail out to the runtime, which will determine the correct
3721 // elements kind with the site.
3722 __ Move(r2, instr->hydrogen()->site());
3723 } else {
3724 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
3725 }
3726 ElementsKind kind = instr->hydrogen()->elements_kind();
3727 AllocationSiteOverrideMode override_mode =
3728 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3729 ? DISABLE_ALLOCATION_SITES
3730 : DONT_OVERRIDE;
3731
3732 if (instr->arity() == 0) {
3733 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3734 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3735 } else if (instr->arity() == 1) {
3736 Label done;
3737 if (IsFastPackedElementsKind(kind)) {
3738 Label packed_case;
3739 // We might need a change here
3740 // look at the first argument
3741 __ ldr(r5, MemOperand(sp, 0));
3742 __ cmp(r5, Operand::Zero());
3743 __ b(eq, &packed_case);
3744
3745 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3746 ArraySingleArgumentConstructorStub stub(isolate(),
3747 holey_kind,
3748 override_mode);
3749 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3750 __ jmp(&done);
3751 __ bind(&packed_case);
3752 }
3753
3754 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3755 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3756 __ bind(&done);
3757 } else {
3758 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
3759 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3760 }
3761}
3762
3763
3764void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3765 CallRuntime(instr->function(), instr->arity(), instr);
3766}
3767
3768
3769void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3770 Register function = ToRegister(instr->function());
3771 Register code_object = ToRegister(instr->code_object());
3772 __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
3773 __ str(code_object,
3774 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
3775}
3776
3777
3778void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3779 Register result = ToRegister(instr->result());
3780 Register base = ToRegister(instr->base_object());
3781 if (instr->offset()->IsConstantOperand()) {
3782 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3783 __ add(result, base, Operand(ToInteger32(offset)));
3784 } else {
3785 Register offset = ToRegister(instr->offset());
3786 __ add(result, base, offset);
3787 }
3788}
3789
3790
3791void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3792 Representation representation = instr->representation();
3793
3794 Register object = ToRegister(instr->object());
3795 Register scratch = scratch0();
3796 HObjectAccess access = instr->hydrogen()->access();
3797 int offset = access.offset();
3798
3799 if (access.IsExternalMemory()) {
3800 Register value = ToRegister(instr->value());
3801 MemOperand operand = MemOperand(object, offset);
3802 __ Store(value, operand, representation);
3803 return;
3804 }
3805
3806 __ AssertNotSmi(object);
3807
3808 DCHECK(!representation.IsSmi() ||
3809 !instr->value()->IsConstantOperand() ||
3810 IsSmi(LConstantOperand::cast(instr->value())));
3811 if (representation.IsDouble()) {
3812 DCHECK(access.IsInobject());
3813 DCHECK(!instr->hydrogen()->has_transition());
3814 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3815 DwVfpRegister value = ToDoubleRegister(instr->value());
3816 __ vstr(value, FieldMemOperand(object, offset));
3817 return;
3818 }
3819
3820 if (instr->hydrogen()->has_transition()) {
3821 Handle<Map> transition = instr->hydrogen()->transition_map();
3822 AddDeprecationDependency(transition);
3823 __ mov(scratch, Operand(transition));
3824 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3825 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3826 Register temp = ToRegister(instr->temp());
3827 // Update the write barrier for the map field.
3828 __ RecordWriteForMap(object,
3829 scratch,
3830 temp,
3831 GetLinkRegisterState(),
3832 kSaveFPRegs);
3833 }
3834 }
3835
3836 // Do the store.
3837 Register value = ToRegister(instr->value());
3838 if (access.IsInobject()) {
3839 MemOperand operand = FieldMemOperand(object, offset);
3840 __ Store(value, operand, representation);
3841 if (instr->hydrogen()->NeedsWriteBarrier()) {
3842 // Update the write barrier for the object for in-object properties.
3843 __ RecordWriteField(object,
3844 offset,
3845 value,
3846 scratch,
3847 GetLinkRegisterState(),
3848 kSaveFPRegs,
3849 EMIT_REMEMBERED_SET,
3850 instr->hydrogen()->SmiCheckForWriteBarrier(),
3851 instr->hydrogen()->PointersToHereCheckForValue());
3852 }
3853 } else {
3854 __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3855 MemOperand operand = FieldMemOperand(scratch, offset);
3856 __ Store(value, operand, representation);
3857 if (instr->hydrogen()->NeedsWriteBarrier()) {
3858 // Update the write barrier for the properties array.
3859 // object is used as a scratch register.
3860 __ RecordWriteField(scratch,
3861 offset,
3862 value,
3863 object,
3864 GetLinkRegisterState(),
3865 kSaveFPRegs,
3866 EMIT_REMEMBERED_SET,
3867 instr->hydrogen()->SmiCheckForWriteBarrier(),
3868 instr->hydrogen()->PointersToHereCheckForValue());
3869 }
3870 }
3871}
3872
3873
3874void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3875 DCHECK(ToRegister(instr->context()).is(cp));
3876 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
3877 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
3878
3879 if (instr->hydrogen()->HasVectorAndSlot()) {
3880 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
3881 }
3882
3883 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
3884 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
3885 isolate(), instr->language_mode(),
3886 instr->hydrogen()->initialization_state()).code();
3887 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3888}
3889
3890
3891void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3892 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
3893 if (instr->index()->IsConstantOperand()) {
3894 Operand index = ToOperand(instr->index());
3895 Register length = ToRegister(instr->length());
3896 __ cmp(length, index);
3897 cc = CommuteCondition(cc);
3898 } else {
3899 Register index = ToRegister(instr->index());
3900 Operand length = ToOperand(instr->length());
3901 __ cmp(index, length);
3902 }
3903 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
3904 Label done;
3905 __ b(NegateCondition(cc), &done);
3906 __ stop("eliminated bounds check failed");
3907 __ bind(&done);
3908 } else {
3909 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
3910 }
3911}
3912
3913
3914void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
3915 Register external_pointer = ToRegister(instr->elements());
3916 Register key = no_reg;
3917 ElementsKind elements_kind = instr->elements_kind();
3918 bool key_is_constant = instr->key()->IsConstantOperand();
3919 int constant_key = 0;
3920 if (key_is_constant) {
3921 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3922 if (constant_key & 0xF0000000) {
3923 Abort(kArrayIndexConstantValueTooBig);
3924 }
3925 } else {
3926 key = ToRegister(instr->key());
3927 }
3928 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3929 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3930 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3931 int base_offset = instr->base_offset();
3932
3933 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
3934 Register address = scratch0();
3935 DwVfpRegister value(ToDoubleRegister(instr->value()));
3936 if (key_is_constant) {
3937 if (constant_key != 0) {
3938 __ add(address, external_pointer,
3939 Operand(constant_key << element_size_shift));
3940 } else {
3941 address = external_pointer;
3942 }
3943 } else {
3944 __ add(address, external_pointer, Operand(key, LSL, shift_size));
3945 }
3946 if (elements_kind == FLOAT32_ELEMENTS) {
3947 __ vcvt_f32_f64(double_scratch0().low(), value);
3948 __ vstr(double_scratch0().low(), address, base_offset);
3949 } else { // Storing doubles, not floats.
3950 __ vstr(value, address, base_offset);
3951 }
3952 } else {
3953 Register value(ToRegister(instr->value()));
3954 MemOperand mem_operand = PrepareKeyedOperand(
3955 key, external_pointer, key_is_constant, constant_key,
3956 element_size_shift, shift_size,
3957 base_offset);
3958 switch (elements_kind) {
3959 case UINT8_ELEMENTS:
3960 case UINT8_CLAMPED_ELEMENTS:
3961 case INT8_ELEMENTS:
3962 __ strb(value, mem_operand);
3963 break;
3964 case INT16_ELEMENTS:
3965 case UINT16_ELEMENTS:
3966 __ strh(value, mem_operand);
3967 break;
3968 case INT32_ELEMENTS:
3969 case UINT32_ELEMENTS:
3970 __ str(value, mem_operand);
3971 break;
3972 case FLOAT32_ELEMENTS:
3973 case FLOAT64_ELEMENTS:
3974 case FAST_DOUBLE_ELEMENTS:
3975 case FAST_ELEMENTS:
3976 case FAST_SMI_ELEMENTS:
3977 case FAST_HOLEY_DOUBLE_ELEMENTS:
3978 case FAST_HOLEY_ELEMENTS:
3979 case FAST_HOLEY_SMI_ELEMENTS:
3980 case DICTIONARY_ELEMENTS:
3981 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3982 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
Ben Murdoch097c5b22016-05-18 11:27:45 +01003983 case FAST_STRING_WRAPPER_ELEMENTS:
3984 case SLOW_STRING_WRAPPER_ELEMENTS:
3985 case NO_ELEMENTS:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003986 UNREACHABLE();
3987 break;
3988 }
3989 }
3990}
3991
3992
3993void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
3994 DwVfpRegister value = ToDoubleRegister(instr->value());
3995 Register elements = ToRegister(instr->elements());
3996 Register scratch = scratch0();
3997 DwVfpRegister double_scratch = double_scratch0();
3998 bool key_is_constant = instr->key()->IsConstantOperand();
3999 int base_offset = instr->base_offset();
4000
4001 // Calculate the effective address of the slot in the array to store the
4002 // double value.
4003 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4004 if (key_is_constant) {
4005 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4006 if (constant_key & 0xF0000000) {
4007 Abort(kArrayIndexConstantValueTooBig);
4008 }
4009 __ add(scratch, elements,
4010 Operand((constant_key << element_size_shift) + base_offset));
4011 } else {
4012 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4013 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4014 __ add(scratch, elements, Operand(base_offset));
4015 __ add(scratch, scratch,
4016 Operand(ToRegister(instr->key()), LSL, shift_size));
4017 }
4018
4019 if (instr->NeedsCanonicalization()) {
4020 // Force a canonical NaN.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004021 __ VFPCanonicalizeNaN(double_scratch, value);
4022 __ vstr(double_scratch, scratch, 0);
4023 } else {
4024 __ vstr(value, scratch, 0);
4025 }
4026}
4027
4028
4029void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4030 Register value = ToRegister(instr->value());
4031 Register elements = ToRegister(instr->elements());
4032 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4033 : no_reg;
4034 Register scratch = scratch0();
4035 Register store_base = scratch;
4036 int offset = instr->base_offset();
4037
4038 // Do the store.
4039 if (instr->key()->IsConstantOperand()) {
4040 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4041 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4042 offset += ToInteger32(const_operand) * kPointerSize;
4043 store_base = elements;
4044 } else {
4045 // Even though the HLoadKeyed instruction forces the input
4046 // representation for the key to be an integer, the input gets replaced
4047 // during bound check elimination with the index argument to the bounds
4048 // check, which can be tagged, so that case must be handled here, too.
4049 if (instr->hydrogen()->key()->representation().IsSmi()) {
4050 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
4051 } else {
4052 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
4053 }
4054 }
4055 __ str(value, MemOperand(store_base, offset));
4056
4057 if (instr->hydrogen()->NeedsWriteBarrier()) {
4058 SmiCheck check_needed =
4059 instr->hydrogen()->value()->type().IsHeapObject()
4060 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4061 // Compute address of modified element and store it into key register.
4062 __ add(key, store_base, Operand(offset));
4063 __ RecordWrite(elements,
4064 key,
4065 value,
4066 GetLinkRegisterState(),
4067 kSaveFPRegs,
4068 EMIT_REMEMBERED_SET,
4069 check_needed,
4070 instr->hydrogen()->PointersToHereCheckForValue());
4071 }
4072}
4073
4074
4075void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4076 // By cases: external, fast double
4077 if (instr->is_fixed_typed_array()) {
4078 DoStoreKeyedExternalArray(instr);
4079 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4080 DoStoreKeyedFixedDoubleArray(instr);
4081 } else {
4082 DoStoreKeyedFixedArray(instr);
4083 }
4084}
4085
4086
4087void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4088 DCHECK(ToRegister(instr->context()).is(cp));
4089 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4090 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4091 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4092
4093 if (instr->hydrogen()->HasVectorAndSlot()) {
4094 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4095 }
4096
4097 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4098 isolate(), instr->language_mode(),
4099 instr->hydrogen()->initialization_state()).code();
4100 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4101}
4102
4103
4104void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4105 class DeferredMaybeGrowElements final : public LDeferredCode {
4106 public:
4107 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4108 : LDeferredCode(codegen), instr_(instr) {}
4109 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4110 LInstruction* instr() override { return instr_; }
4111
4112 private:
4113 LMaybeGrowElements* instr_;
4114 };
4115
4116 Register result = r0;
4117 DeferredMaybeGrowElements* deferred =
4118 new (zone()) DeferredMaybeGrowElements(this, instr);
4119 LOperand* key = instr->key();
4120 LOperand* current_capacity = instr->current_capacity();
4121
4122 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4123 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4124 DCHECK(key->IsConstantOperand() || key->IsRegister());
4125 DCHECK(current_capacity->IsConstantOperand() ||
4126 current_capacity->IsRegister());
4127
4128 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4129 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4130 int32_t constant_capacity =
4131 ToInteger32(LConstantOperand::cast(current_capacity));
4132 if (constant_key >= constant_capacity) {
4133 // Deferred case.
4134 __ jmp(deferred->entry());
4135 }
4136 } else if (key->IsConstantOperand()) {
4137 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4138 __ cmp(ToRegister(current_capacity), Operand(constant_key));
4139 __ b(le, deferred->entry());
4140 } else if (current_capacity->IsConstantOperand()) {
4141 int32_t constant_capacity =
4142 ToInteger32(LConstantOperand::cast(current_capacity));
4143 __ cmp(ToRegister(key), Operand(constant_capacity));
4144 __ b(ge, deferred->entry());
4145 } else {
4146 __ cmp(ToRegister(key), ToRegister(current_capacity));
4147 __ b(ge, deferred->entry());
4148 }
4149
4150 if (instr->elements()->IsRegister()) {
4151 __ Move(result, ToRegister(instr->elements()));
4152 } else {
4153 __ ldr(result, ToMemOperand(instr->elements()));
4154 }
4155
4156 __ bind(deferred->exit());
4157}
4158
4159
4160void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4161 // TODO(3095996): Get rid of this. For now, we need to make the
4162 // result register contain a valid pointer because it is already
4163 // contained in the register pointer map.
4164 Register result = r0;
4165 __ mov(result, Operand::Zero());
4166
4167 // We have to call a stub.
4168 {
4169 PushSafepointRegistersScope scope(this);
4170 if (instr->object()->IsRegister()) {
4171 __ Move(result, ToRegister(instr->object()));
4172 } else {
4173 __ ldr(result, ToMemOperand(instr->object()));
4174 }
4175
4176 LOperand* key = instr->key();
4177 if (key->IsConstantOperand()) {
Ben Murdochc5610432016-08-08 18:44:38 +01004178 LConstantOperand* constant_key = LConstantOperand::cast(key);
4179 int32_t int_key = ToInteger32(constant_key);
4180 if (Smi::IsValid(int_key)) {
4181 __ mov(r3, Operand(Smi::FromInt(int_key)));
4182 } else {
4183 // We should never get here at runtime because there is a smi check on
4184 // the key before this point.
4185 __ stop("expected smi");
4186 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004187 } else {
4188 __ Move(r3, ToRegister(key));
4189 __ SmiTag(r3);
4190 }
4191
4192 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4193 instr->hydrogen()->kind());
4194 __ CallStub(&stub);
4195 RecordSafepointWithLazyDeopt(
4196 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4197 __ StoreToSafepointRegisterSlot(result, result);
4198 }
4199
4200 // Deopt on smi, which means the elements array changed to dictionary mode.
4201 __ SmiTst(result);
4202 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
4203}
4204
4205
4206void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4207 Register object_reg = ToRegister(instr->object());
4208 Register scratch = scratch0();
4209
4210 Handle<Map> from_map = instr->original_map();
4211 Handle<Map> to_map = instr->transitioned_map();
4212 ElementsKind from_kind = instr->from_kind();
4213 ElementsKind to_kind = instr->to_kind();
4214
4215 Label not_applicable;
4216 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4217 __ cmp(scratch, Operand(from_map));
4218 __ b(ne, &not_applicable);
4219
4220 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4221 Register new_map_reg = ToRegister(instr->new_map_temp());
4222 __ mov(new_map_reg, Operand(to_map));
4223 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4224 // Write barrier.
4225 __ RecordWriteForMap(object_reg,
4226 new_map_reg,
4227 scratch,
4228 GetLinkRegisterState(),
4229 kDontSaveFPRegs);
4230 } else {
4231 DCHECK(ToRegister(instr->context()).is(cp));
4232 DCHECK(object_reg.is(r0));
4233 PushSafepointRegistersScope scope(this);
4234 __ Move(r1, to_map);
4235 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4236 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4237 __ CallStub(&stub);
4238 RecordSafepointWithRegisters(
4239 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4240 }
4241 __ bind(&not_applicable);
4242}
4243
4244
4245void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4246 Register object = ToRegister(instr->object());
4247 Register temp = ToRegister(instr->temp());
4248 Label no_memento_found;
4249 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4250 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
4251 __ bind(&no_memento_found);
4252}
4253
4254
4255void LCodeGen::DoStringAdd(LStringAdd* instr) {
4256 DCHECK(ToRegister(instr->context()).is(cp));
4257 DCHECK(ToRegister(instr->left()).is(r1));
4258 DCHECK(ToRegister(instr->right()).is(r0));
4259 StringAddStub stub(isolate(),
4260 instr->hydrogen()->flags(),
4261 instr->hydrogen()->pretenure_flag());
4262 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4263}
4264
4265
4266void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4267 class DeferredStringCharCodeAt final : public LDeferredCode {
4268 public:
4269 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4270 : LDeferredCode(codegen), instr_(instr) { }
4271 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4272 LInstruction* instr() override { return instr_; }
4273
4274 private:
4275 LStringCharCodeAt* instr_;
4276 };
4277
4278 DeferredStringCharCodeAt* deferred =
4279 new(zone()) DeferredStringCharCodeAt(this, instr);
4280
4281 StringCharLoadGenerator::Generate(masm(),
4282 ToRegister(instr->string()),
4283 ToRegister(instr->index()),
4284 ToRegister(instr->result()),
4285 deferred->entry());
4286 __ bind(deferred->exit());
4287}
4288
4289
4290void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4291 Register string = ToRegister(instr->string());
4292 Register result = ToRegister(instr->result());
4293 Register scratch = scratch0();
4294
4295 // TODO(3095996): Get rid of this. For now, we need to make the
4296 // result register contain a valid pointer because it is already
4297 // contained in the register pointer map.
4298 __ mov(result, Operand::Zero());
4299
4300 PushSafepointRegistersScope scope(this);
4301 __ push(string);
4302 // Push the index as a smi. This is safe because of the checks in
4303 // DoStringCharCodeAt above.
4304 if (instr->index()->IsConstantOperand()) {
4305 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4306 __ mov(scratch, Operand(Smi::FromInt(const_index)));
4307 __ push(scratch);
4308 } else {
4309 Register index = ToRegister(instr->index());
4310 __ SmiTag(index);
4311 __ push(index);
4312 }
4313 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4314 instr->context());
4315 __ AssertSmi(r0);
4316 __ SmiUntag(r0);
4317 __ StoreToSafepointRegisterSlot(r0, result);
4318}
4319
4320
4321void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4322 class DeferredStringCharFromCode final : public LDeferredCode {
4323 public:
4324 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4325 : LDeferredCode(codegen), instr_(instr) { }
4326 void Generate() override {
4327 codegen()->DoDeferredStringCharFromCode(instr_);
4328 }
4329 LInstruction* instr() override { return instr_; }
4330
4331 private:
4332 LStringCharFromCode* instr_;
4333 };
4334
4335 DeferredStringCharFromCode* deferred =
4336 new(zone()) DeferredStringCharFromCode(this, instr);
4337
4338 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4339 Register char_code = ToRegister(instr->char_code());
4340 Register result = ToRegister(instr->result());
4341 DCHECK(!char_code.is(result));
4342
4343 __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
4344 __ b(hi, deferred->entry());
4345 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4346 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4347 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4348 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4349 __ cmp(result, ip);
4350 __ b(eq, deferred->entry());
4351 __ bind(deferred->exit());
4352}
4353
4354
4355void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4356 Register char_code = ToRegister(instr->char_code());
4357 Register result = ToRegister(instr->result());
4358
4359 // TODO(3095996): Get rid of this. For now, we need to make the
4360 // result register contain a valid pointer because it is already
4361 // contained in the register pointer map.
4362 __ mov(result, Operand::Zero());
4363
4364 PushSafepointRegistersScope scope(this);
4365 __ SmiTag(char_code);
4366 __ push(char_code);
4367 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4368 instr->context());
4369 __ StoreToSafepointRegisterSlot(r0, result);
4370}
4371
4372
4373void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4374 LOperand* input = instr->value();
4375 DCHECK(input->IsRegister() || input->IsStackSlot());
4376 LOperand* output = instr->result();
4377 DCHECK(output->IsDoubleRegister());
4378 SwVfpRegister single_scratch = double_scratch0().low();
4379 if (input->IsStackSlot()) {
4380 Register scratch = scratch0();
4381 __ ldr(scratch, ToMemOperand(input));
4382 __ vmov(single_scratch, scratch);
4383 } else {
4384 __ vmov(single_scratch, ToRegister(input));
4385 }
4386 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4387}
4388
4389
4390void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4391 LOperand* input = instr->value();
4392 LOperand* output = instr->result();
4393
4394 SwVfpRegister flt_scratch = double_scratch0().low();
4395 __ vmov(flt_scratch, ToRegister(input));
4396 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4397}
4398
4399
4400void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4401 class DeferredNumberTagI final : public LDeferredCode {
4402 public:
4403 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4404 : LDeferredCode(codegen), instr_(instr) { }
4405 void Generate() override {
4406 codegen()->DoDeferredNumberTagIU(instr_,
4407 instr_->value(),
4408 instr_->temp1(),
4409 instr_->temp2(),
4410 SIGNED_INT32);
4411 }
4412 LInstruction* instr() override { return instr_; }
4413
4414 private:
4415 LNumberTagI* instr_;
4416 };
4417
4418 Register src = ToRegister(instr->value());
4419 Register dst = ToRegister(instr->result());
4420
4421 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4422 __ SmiTag(dst, src, SetCC);
4423 __ b(vs, deferred->entry());
4424 __ bind(deferred->exit());
4425}
4426
4427
4428void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4429 class DeferredNumberTagU final : public LDeferredCode {
4430 public:
4431 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4432 : LDeferredCode(codegen), instr_(instr) { }
4433 void Generate() override {
4434 codegen()->DoDeferredNumberTagIU(instr_,
4435 instr_->value(),
4436 instr_->temp1(),
4437 instr_->temp2(),
4438 UNSIGNED_INT32);
4439 }
4440 LInstruction* instr() override { return instr_; }
4441
4442 private:
4443 LNumberTagU* instr_;
4444 };
4445
4446 Register input = ToRegister(instr->value());
4447 Register result = ToRegister(instr->result());
4448
4449 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4450 __ cmp(input, Operand(Smi::kMaxValue));
4451 __ b(hi, deferred->entry());
4452 __ SmiTag(result, input);
4453 __ bind(deferred->exit());
4454}
4455
4456
4457void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4458 LOperand* value,
4459 LOperand* temp1,
4460 LOperand* temp2,
4461 IntegerSignedness signedness) {
4462 Label done, slow;
4463 Register src = ToRegister(value);
4464 Register dst = ToRegister(instr->result());
4465 Register tmp1 = scratch0();
4466 Register tmp2 = ToRegister(temp1);
4467 Register tmp3 = ToRegister(temp2);
4468 LowDwVfpRegister dbl_scratch = double_scratch0();
4469
4470 if (signedness == SIGNED_INT32) {
4471 // There was overflow, so bits 30 and 31 of the original integer
4472 // disagree. Try to allocate a heap number in new space and store
4473 // the value in there. If that fails, call the runtime system.
4474 if (dst.is(src)) {
4475 __ SmiUntag(src, dst);
4476 __ eor(src, src, Operand(0x80000000));
4477 }
4478 __ vmov(dbl_scratch.low(), src);
4479 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4480 } else {
4481 __ vmov(dbl_scratch.low(), src);
4482 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4483 }
4484
4485 if (FLAG_inline_new) {
4486 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
Ben Murdochc5610432016-08-08 18:44:38 +01004487 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004488 __ b(&done);
4489 }
4490
4491 // Slow case: Call the runtime system to do the number allocation.
4492 __ bind(&slow);
4493 {
4494 // TODO(3095996): Put a valid pointer value in the stack slot where the
4495 // result register is stored, as this register is in the pointer map, but
4496 // contains an integer value.
4497 __ mov(dst, Operand::Zero());
4498
4499 // Preserve the value of all registers.
4500 PushSafepointRegistersScope scope(this);
4501
4502 // NumberTagI and NumberTagD use the context from the frame, rather than
4503 // the environment's HContext or HInlinedContext value.
4504 // They only call Runtime::kAllocateHeapNumber.
4505 // The corresponding HChange instructions are added in a phase that does
4506 // not have easy access to the local context.
4507 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4508 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4509 RecordSafepointWithRegisters(
4510 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004511 __ StoreToSafepointRegisterSlot(r0, dst);
4512 }
4513
4514 // Done. Put the value in dbl_scratch into the value of the allocated heap
4515 // number.
4516 __ bind(&done);
Ben Murdochc5610432016-08-08 18:44:38 +01004517 __ vstr(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004518}
4519
4520
4521void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4522 class DeferredNumberTagD final : public LDeferredCode {
4523 public:
4524 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4525 : LDeferredCode(codegen), instr_(instr) { }
4526 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4527 LInstruction* instr() override { return instr_; }
4528
4529 private:
4530 LNumberTagD* instr_;
4531 };
4532
4533 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4534 Register scratch = scratch0();
4535 Register reg = ToRegister(instr->result());
4536 Register temp1 = ToRegister(instr->temp());
4537 Register temp2 = ToRegister(instr->temp2());
4538
4539 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4540 if (FLAG_inline_new) {
4541 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
Ben Murdochc5610432016-08-08 18:44:38 +01004542 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004543 } else {
4544 __ jmp(deferred->entry());
4545 }
4546 __ bind(deferred->exit());
Ben Murdochc5610432016-08-08 18:44:38 +01004547 __ vstr(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004548}
4549
4550
4551void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4552 // TODO(3095996): Get rid of this. For now, we need to make the
4553 // result register contain a valid pointer because it is already
4554 // contained in the register pointer map.
4555 Register reg = ToRegister(instr->result());
4556 __ mov(reg, Operand::Zero());
4557
4558 PushSafepointRegistersScope scope(this);
4559 // NumberTagI and NumberTagD use the context from the frame, rather than
4560 // the environment's HContext or HInlinedContext value.
4561 // They only call Runtime::kAllocateHeapNumber.
4562 // The corresponding HChange instructions are added in a phase that does
4563 // not have easy access to the local context.
4564 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4565 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4566 RecordSafepointWithRegisters(
4567 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004568 __ StoreToSafepointRegisterSlot(r0, reg);
4569}
4570
4571
4572void LCodeGen::DoSmiTag(LSmiTag* instr) {
4573 HChange* hchange = instr->hydrogen();
4574 Register input = ToRegister(instr->value());
4575 Register output = ToRegister(instr->result());
4576 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4577 hchange->value()->CheckFlag(HValue::kUint32)) {
4578 __ tst(input, Operand(0xc0000000));
4579 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
4580 }
4581 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4582 !hchange->value()->CheckFlag(HValue::kUint32)) {
4583 __ SmiTag(output, input, SetCC);
4584 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4585 } else {
4586 __ SmiTag(output, input);
4587 }
4588}
4589
4590
4591void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4592 Register input = ToRegister(instr->value());
4593 Register result = ToRegister(instr->result());
4594 if (instr->needs_check()) {
4595 STATIC_ASSERT(kHeapObjectTag == 1);
4596 // If the input is a HeapObject, SmiUntag will set the carry flag.
4597 __ SmiUntag(result, input, SetCC);
4598 DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi);
4599 } else {
4600 __ SmiUntag(result, input);
4601 }
4602}
4603
4604
4605void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4606 DwVfpRegister result_reg,
4607 NumberUntagDMode mode) {
4608 bool can_convert_undefined_to_nan =
4609 instr->hydrogen()->can_convert_undefined_to_nan();
4610 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4611
4612 Register scratch = scratch0();
4613 SwVfpRegister flt_scratch = double_scratch0().low();
4614 DCHECK(!result_reg.is(double_scratch0()));
4615 Label convert, load_smi, done;
4616 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4617 // Smi check.
4618 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4619 // Heap number map check.
4620 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4621 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4622 __ cmp(scratch, Operand(ip));
4623 if (can_convert_undefined_to_nan) {
4624 __ b(ne, &convert);
4625 } else {
4626 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4627 }
4628 // load heap number
4629 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
4630 if (deoptimize_on_minus_zero) {
4631 __ VmovLow(scratch, result_reg);
4632 __ cmp(scratch, Operand::Zero());
4633 __ b(ne, &done);
4634 __ VmovHigh(scratch, result_reg);
4635 __ cmp(scratch, Operand(HeapNumber::kSignMask));
4636 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4637 }
4638 __ jmp(&done);
4639 if (can_convert_undefined_to_nan) {
4640 __ bind(&convert);
4641 // Convert undefined (and hole) to NaN.
4642 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4643 __ cmp(input_reg, Operand(ip));
4644 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
4645 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4646 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
4647 __ jmp(&done);
4648 }
4649 } else {
4650 __ SmiUntag(scratch, input_reg);
4651 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4652 }
4653 // Smi to double register conversion
4654 __ bind(&load_smi);
4655 // scratch: untagged value of input_reg
4656 __ vmov(flt_scratch, scratch);
4657 __ vcvt_f64_s32(result_reg, flt_scratch);
4658 __ bind(&done);
4659}
4660
4661
4662void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4663 Register input_reg = ToRegister(instr->value());
4664 Register scratch1 = scratch0();
4665 Register scratch2 = ToRegister(instr->temp());
4666 LowDwVfpRegister double_scratch = double_scratch0();
4667 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4668
4669 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4670 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4671
4672 Label done;
4673
4674 // The input was optimistically untagged; revert it.
4675 // The carry flag is set when we reach this deferred code as we just executed
4676 // SmiUntag(heap_object, SetCC)
4677 STATIC_ASSERT(kHeapObjectTag == 1);
4678 __ adc(scratch2, input_reg, Operand(input_reg));
4679
4680 // Heap number map check.
4681 __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
4682 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4683 __ cmp(scratch1, Operand(ip));
4684
4685 if (instr->truncating()) {
4686 // Performs a truncating conversion of a floating point number as used by
4687 // the JS bitwise operations.
4688 Label no_heap_number, check_bools, check_false;
4689 __ b(ne, &no_heap_number);
4690 __ TruncateHeapNumberToI(input_reg, scratch2);
4691 __ b(&done);
4692
4693 // Check for Oddballs. Undefined/False is converted to zero and True to one
4694 // for truncating conversions.
4695 __ bind(&no_heap_number);
4696 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4697 __ cmp(scratch2, Operand(ip));
4698 __ b(ne, &check_bools);
4699 __ mov(input_reg, Operand::Zero());
4700 __ b(&done);
4701
4702 __ bind(&check_bools);
4703 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
4704 __ cmp(scratch2, Operand(ip));
4705 __ b(ne, &check_false);
4706 __ mov(input_reg, Operand(1));
4707 __ b(&done);
4708
4709 __ bind(&check_false);
4710 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
4711 __ cmp(scratch2, Operand(ip));
4712 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
4713 __ mov(input_reg, Operand::Zero());
4714 } else {
4715 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4716
4717 __ sub(ip, scratch2, Operand(kHeapObjectTag));
4718 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
4719 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
4720 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
4721
4722 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4723 __ cmp(input_reg, Operand::Zero());
4724 __ b(ne, &done);
4725 __ VmovHigh(scratch1, double_scratch2);
4726 __ tst(scratch1, Operand(HeapNumber::kSignMask));
4727 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
4728 }
4729 }
4730 __ bind(&done);
4731}
4732
4733
4734void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4735 class DeferredTaggedToI final : public LDeferredCode {
4736 public:
4737 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4738 : LDeferredCode(codegen), instr_(instr) { }
4739 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4740 LInstruction* instr() override { return instr_; }
4741
4742 private:
4743 LTaggedToI* instr_;
4744 };
4745
4746 LOperand* input = instr->value();
4747 DCHECK(input->IsRegister());
4748 DCHECK(input->Equals(instr->result()));
4749
4750 Register input_reg = ToRegister(input);
4751
4752 if (instr->hydrogen()->value()->representation().IsSmi()) {
4753 __ SmiUntag(input_reg);
4754 } else {
4755 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4756
4757 // Optimistically untag the input.
4758 // If the input is a HeapObject, SmiUntag will set the carry flag.
4759 __ SmiUntag(input_reg, SetCC);
4760 // Branch to deferred code if the input was tagged.
4761 // The deferred code will take care of restoring the tag.
4762 __ b(cs, deferred->entry());
4763 __ bind(deferred->exit());
4764 }
4765}
4766
4767
4768void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4769 LOperand* input = instr->value();
4770 DCHECK(input->IsRegister());
4771 LOperand* result = instr->result();
4772 DCHECK(result->IsDoubleRegister());
4773
4774 Register input_reg = ToRegister(input);
4775 DwVfpRegister result_reg = ToDoubleRegister(result);
4776
4777 HValue* value = instr->hydrogen()->value();
4778 NumberUntagDMode mode = value->representation().IsSmi()
4779 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4780
4781 EmitNumberUntagD(instr, input_reg, result_reg, mode);
4782}
4783
4784
4785void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4786 Register result_reg = ToRegister(instr->result());
4787 Register scratch1 = scratch0();
4788 DwVfpRegister double_input = ToDoubleRegister(instr->value());
4789 LowDwVfpRegister double_scratch = double_scratch0();
4790
4791 if (instr->truncating()) {
4792 __ TruncateDoubleToI(result_reg, double_input);
4793 } else {
4794 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
4795 // Deoptimize if the input wasn't a int32 (inside a double).
4796 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
4797 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4798 Label done;
4799 __ cmp(result_reg, Operand::Zero());
4800 __ b(ne, &done);
4801 __ VmovHigh(scratch1, double_input);
4802 __ tst(scratch1, Operand(HeapNumber::kSignMask));
4803 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
4804 __ bind(&done);
4805 }
4806 }
4807}
4808
4809
4810void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4811 Register result_reg = ToRegister(instr->result());
4812 Register scratch1 = scratch0();
4813 DwVfpRegister double_input = ToDoubleRegister(instr->value());
4814 LowDwVfpRegister double_scratch = double_scratch0();
4815
4816 if (instr->truncating()) {
4817 __ TruncateDoubleToI(result_reg, double_input);
4818 } else {
4819 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
4820 // Deoptimize if the input wasn't a int32 (inside a double).
4821 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
4822 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4823 Label done;
4824 __ cmp(result_reg, Operand::Zero());
4825 __ b(ne, &done);
4826 __ VmovHigh(scratch1, double_input);
4827 __ tst(scratch1, Operand(HeapNumber::kSignMask));
4828 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
4829 __ bind(&done);
4830 }
4831 }
4832 __ SmiTag(result_reg, SetCC);
4833 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4834}
4835
4836
4837void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4838 LOperand* input = instr->value();
4839 __ SmiTst(ToRegister(input));
4840 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
4841}
4842
4843
4844void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4845 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4846 LOperand* input = instr->value();
4847 __ SmiTst(ToRegister(input));
4848 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
4849 }
4850}
4851
4852
4853void LCodeGen::DoCheckArrayBufferNotNeutered(
4854 LCheckArrayBufferNotNeutered* instr) {
4855 Register view = ToRegister(instr->view());
4856 Register scratch = scratch0();
4857
4858 __ ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
4859 __ ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
4860 __ tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
4861 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
4862}
4863
4864
4865void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4866 Register input = ToRegister(instr->value());
4867 Register scratch = scratch0();
4868
4869 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
4870 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4871
4872 if (instr->hydrogen()->is_interval_check()) {
4873 InstanceType first;
4874 InstanceType last;
4875 instr->hydrogen()->GetCheckInterval(&first, &last);
4876
4877 __ cmp(scratch, Operand(first));
4878
4879 // If there is only one type in the interval check for equality.
4880 if (first == last) {
4881 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
4882 } else {
4883 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
4884 // Omit check for the last type.
4885 if (last != LAST_TYPE) {
4886 __ cmp(scratch, Operand(last));
4887 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
4888 }
4889 }
4890 } else {
4891 uint8_t mask;
4892 uint8_t tag;
4893 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4894
4895 if (base::bits::IsPowerOfTwo32(mask)) {
4896 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
4897 __ tst(scratch, Operand(mask));
4898 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
4899 } else {
4900 __ and_(scratch, scratch, Operand(mask));
4901 __ cmp(scratch, Operand(tag));
4902 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
4903 }
4904 }
4905}
4906
4907
4908void LCodeGen::DoCheckValue(LCheckValue* instr) {
4909 Register reg = ToRegister(instr->value());
4910 Handle<HeapObject> object = instr->hydrogen()->object().handle();
4911 AllowDeferredHandleDereference smi_check;
4912 if (isolate()->heap()->InNewSpace(*object)) {
4913 Register reg = ToRegister(instr->value());
4914 Handle<Cell> cell = isolate()->factory()->NewCell(object);
4915 __ mov(ip, Operand(cell));
4916 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
4917 __ cmp(reg, ip);
4918 } else {
4919 __ cmp(reg, Operand(object));
4920 }
4921 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
4922}
4923
4924
4925void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
4926 {
4927 PushSafepointRegistersScope scope(this);
4928 __ push(object);
4929 __ mov(cp, Operand::Zero());
4930 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
4931 RecordSafepointWithRegisters(
4932 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
4933 __ StoreToSafepointRegisterSlot(r0, scratch0());
4934 }
4935 __ tst(scratch0(), Operand(kSmiTagMask));
4936 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed);
4937}
4938
4939
4940void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4941 class DeferredCheckMaps final : public LDeferredCode {
4942 public:
4943 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
4944 : LDeferredCode(codegen), instr_(instr), object_(object) {
4945 SetExit(check_maps());
4946 }
4947 void Generate() override {
4948 codegen()->DoDeferredInstanceMigration(instr_, object_);
4949 }
4950 Label* check_maps() { return &check_maps_; }
4951 LInstruction* instr() override { return instr_; }
4952
4953 private:
4954 LCheckMaps* instr_;
4955 Label check_maps_;
4956 Register object_;
4957 };
4958
4959 if (instr->hydrogen()->IsStabilityCheck()) {
4960 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4961 for (int i = 0; i < maps->size(); ++i) {
4962 AddStabilityDependency(maps->at(i).handle());
4963 }
4964 return;
4965 }
4966
4967 Register map_reg = scratch0();
4968
4969 LOperand* input = instr->value();
4970 DCHECK(input->IsRegister());
4971 Register reg = ToRegister(input);
4972
4973 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
4974
4975 DeferredCheckMaps* deferred = NULL;
4976 if (instr->hydrogen()->HasMigrationTarget()) {
4977 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
4978 __ bind(deferred->check_maps());
4979 }
4980
4981 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4982 Label success;
4983 for (int i = 0; i < maps->size() - 1; i++) {
4984 Handle<Map> map = maps->at(i).handle();
4985 __ CompareMap(map_reg, map, &success);
4986 __ b(eq, &success);
4987 }
4988
4989 Handle<Map> map = maps->at(maps->size() - 1).handle();
4990 __ CompareMap(map_reg, map, &success);
4991 if (instr->hydrogen()->HasMigrationTarget()) {
4992 __ b(ne, deferred->entry());
4993 } else {
4994 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
4995 }
4996
4997 __ bind(&success);
4998}
4999
5000
5001void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5002 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
5003 Register result_reg = ToRegister(instr->result());
5004 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5005}
5006
5007
5008void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5009 Register unclamped_reg = ToRegister(instr->unclamped());
5010 Register result_reg = ToRegister(instr->result());
5011 __ ClampUint8(result_reg, unclamped_reg);
5012}
5013
5014
5015void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5016 Register scratch = scratch0();
5017 Register input_reg = ToRegister(instr->unclamped());
5018 Register result_reg = ToRegister(instr->result());
5019 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
5020 Label is_smi, done, heap_number;
5021
5022 // Both smi and heap number cases are handled.
5023 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5024
5025 // Check for heap number
5026 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5027 __ cmp(scratch, Operand(factory()->heap_number_map()));
5028 __ b(eq, &heap_number);
5029
5030 // Check for undefined. Undefined is converted to zero for clamping
5031 // conversions.
5032 __ cmp(input_reg, Operand(factory()->undefined_value()));
5033 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
5034 __ mov(result_reg, Operand::Zero());
5035 __ jmp(&done);
5036
5037 // Heap number
5038 __ bind(&heap_number);
5039 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5040 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5041 __ jmp(&done);
5042
5043 // smi
5044 __ bind(&is_smi);
5045 __ ClampUint8(result_reg, result_reg);
5046
5047 __ bind(&done);
5048}
5049
5050
5051void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5052 DwVfpRegister value_reg = ToDoubleRegister(instr->value());
5053 Register result_reg = ToRegister(instr->result());
5054 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5055 __ VmovHigh(result_reg, value_reg);
5056 } else {
5057 __ VmovLow(result_reg, value_reg);
5058 }
5059}
5060
5061
5062void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5063 Register hi_reg = ToRegister(instr->hi());
5064 Register lo_reg = ToRegister(instr->lo());
5065 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
5066 __ VmovHigh(result_reg, hi_reg);
5067 __ VmovLow(result_reg, lo_reg);
5068}
5069
5070
5071void LCodeGen::DoAllocate(LAllocate* instr) {
5072 class DeferredAllocate final : public LDeferredCode {
5073 public:
5074 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5075 : LDeferredCode(codegen), instr_(instr) { }
5076 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5077 LInstruction* instr() override { return instr_; }
5078
5079 private:
5080 LAllocate* instr_;
5081 };
5082
5083 DeferredAllocate* deferred =
5084 new(zone()) DeferredAllocate(this, instr);
5085
5086 Register result = ToRegister(instr->result());
5087 Register scratch = ToRegister(instr->temp1());
5088 Register scratch2 = ToRegister(instr->temp2());
5089
5090 // Allocate memory for the object.
Ben Murdochc5610432016-08-08 18:44:38 +01005091 AllocationFlags flags = NO_ALLOCATION_FLAGS;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005092 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5093 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5094 }
5095 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5096 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5097 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5098 }
5099
Ben Murdochc5610432016-08-08 18:44:38 +01005100 if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5101 flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
5102 }
5103 DCHECK(!instr->hydrogen()->IsAllocationFolded());
5104
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005105 if (instr->size()->IsConstantOperand()) {
5106 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5107 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5108 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5109 } else {
5110 Register size = ToRegister(instr->size());
5111 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5112 }
5113
5114 __ bind(deferred->exit());
5115
5116 if (instr->hydrogen()->MustPrefillWithFiller()) {
5117 STATIC_ASSERT(kHeapObjectTag == 1);
5118 if (instr->size()->IsConstantOperand()) {
5119 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5120 __ mov(scratch, Operand(size - kHeapObjectTag));
5121 } else {
5122 __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5123 }
5124 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5125 Label loop;
5126 __ bind(&loop);
5127 __ sub(scratch, scratch, Operand(kPointerSize), SetCC);
5128 __ str(scratch2, MemOperand(result, scratch));
5129 __ b(ge, &loop);
5130 }
5131}
5132
5133
5134void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5135 Register result = ToRegister(instr->result());
5136
5137 // TODO(3095996): Get rid of this. For now, we need to make the
5138 // result register contain a valid pointer because it is already
5139 // contained in the register pointer map.
5140 __ mov(result, Operand(Smi::FromInt(0)));
5141
5142 PushSafepointRegistersScope scope(this);
5143 if (instr->size()->IsRegister()) {
5144 Register size = ToRegister(instr->size());
5145 DCHECK(!size.is(result));
5146 __ SmiTag(size);
5147 __ push(size);
5148 } else {
5149 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5150 if (size >= 0 && size <= Smi::kMaxValue) {
5151 __ Push(Smi::FromInt(size));
5152 } else {
5153 // We should never get here at runtime => abort
5154 __ stop("invalid allocation size");
5155 return;
5156 }
5157 }
5158
5159 int flags = AllocateDoubleAlignFlag::encode(
5160 instr->hydrogen()->MustAllocateDoubleAligned());
5161 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5162 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5163 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5164 } else {
5165 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5166 }
5167 __ Push(Smi::FromInt(flags));
5168
5169 CallRuntimeFromDeferred(
5170 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5171 __ StoreToSafepointRegisterSlot(r0, result);
Ben Murdochc5610432016-08-08 18:44:38 +01005172
5173 if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5174 AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
5175 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5176 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5177 allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
5178 }
5179 // If the allocation folding dominator allocate triggered a GC, allocation
5180 // happend in the runtime. We have to reset the top pointer to virtually
5181 // undo the allocation.
5182 ExternalReference allocation_top =
5183 AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
5184 Register top_address = scratch0();
5185 __ sub(r0, r0, Operand(kHeapObjectTag));
5186 __ mov(top_address, Operand(allocation_top));
5187 __ str(r0, MemOperand(top_address));
5188 __ add(r0, r0, Operand(kHeapObjectTag));
5189 }
5190}
5191
5192void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
5193 DCHECK(instr->hydrogen()->IsAllocationFolded());
5194 DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
5195 Register result = ToRegister(instr->result());
5196 Register scratch1 = ToRegister(instr->temp1());
5197 Register scratch2 = ToRegister(instr->temp2());
5198
5199 AllocationFlags flags = ALLOCATION_FOLDED;
5200 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5201 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5202 }
5203 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5204 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5205 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5206 }
5207 if (instr->size()->IsConstantOperand()) {
5208 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5209 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5210 __ FastAllocate(size, result, scratch1, scratch2, flags);
5211 } else {
5212 Register size = ToRegister(instr->size());
5213 __ FastAllocate(size, result, scratch1, scratch2, flags);
5214 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005215}
5216
5217
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005218void LCodeGen::DoTypeof(LTypeof* instr) {
5219 DCHECK(ToRegister(instr->value()).is(r3));
5220 DCHECK(ToRegister(instr->result()).is(r0));
5221 Label end, do_call;
5222 Register value_register = ToRegister(instr->value());
5223 __ JumpIfNotSmi(value_register, &do_call);
5224 __ mov(r0, Operand(isolate()->factory()->number_string()));
5225 __ jmp(&end);
5226 __ bind(&do_call);
5227 TypeofStub stub(isolate());
5228 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5229 __ bind(&end);
5230}
5231
5232
5233void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5234 Register input = ToRegister(instr->value());
5235
5236 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5237 instr->FalseLabel(chunk_),
5238 input,
5239 instr->type_literal());
5240 if (final_branch_condition != kNoCondition) {
5241 EmitBranch(instr, final_branch_condition);
5242 }
5243}
5244
5245
5246Condition LCodeGen::EmitTypeofIs(Label* true_label,
5247 Label* false_label,
5248 Register input,
5249 Handle<String> type_name) {
5250 Condition final_branch_condition = kNoCondition;
5251 Register scratch = scratch0();
5252 Factory* factory = isolate()->factory();
5253 if (String::Equals(type_name, factory->number_string())) {
5254 __ JumpIfSmi(input, true_label);
5255 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5256 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5257 final_branch_condition = eq;
5258
5259 } else if (String::Equals(type_name, factory->string_string())) {
5260 __ JumpIfSmi(input, false_label);
5261 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5262 final_branch_condition = lt;
5263
5264 } else if (String::Equals(type_name, factory->symbol_string())) {
5265 __ JumpIfSmi(input, false_label);
5266 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5267 final_branch_condition = eq;
5268
5269 } else if (String::Equals(type_name, factory->boolean_string())) {
5270 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5271 __ b(eq, true_label);
5272 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5273 final_branch_condition = eq;
5274
5275 } else if (String::Equals(type_name, factory->undefined_string())) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005276 __ CompareRoot(input, Heap::kNullValueRootIndex);
5277 __ b(eq, false_label);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005278 __ JumpIfSmi(input, false_label);
5279 // Check for undetectable objects => true.
5280 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5281 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5282 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5283 final_branch_condition = ne;
5284
5285 } else if (String::Equals(type_name, factory->function_string())) {
5286 __ JumpIfSmi(input, false_label);
5287 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5288 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5289 __ and_(scratch, scratch,
5290 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5291 __ cmp(scratch, Operand(1 << Map::kIsCallable));
5292 final_branch_condition = eq;
5293
5294 } else if (String::Equals(type_name, factory->object_string())) {
5295 __ JumpIfSmi(input, false_label);
5296 __ CompareRoot(input, Heap::kNullValueRootIndex);
5297 __ b(eq, true_label);
5298 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5299 __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
5300 __ b(lt, false_label);
5301 // Check for callable or undetectable objects => false.
5302 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5303 __ tst(scratch,
5304 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5305 final_branch_condition = eq;
5306
5307// clang-format off
5308#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5309 } else if (String::Equals(type_name, factory->type##_string())) { \
5310 __ JumpIfSmi(input, false_label); \
5311 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
5312 __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \
5313 final_branch_condition = eq;
5314 SIMD128_TYPES(SIMD128_TYPE)
5315#undef SIMD128_TYPE
5316 // clang-format on
5317
5318 } else {
5319 __ b(false_label);
5320 }
5321
5322 return final_branch_condition;
5323}
5324
5325
5326void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5327 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5328 // Ensure that we have enough space after the previous lazy-bailout
5329 // instruction for patching the code here.
5330 int current_pc = masm()->pc_offset();
5331 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5332 // Block literal pool emission for duration of padding.
5333 Assembler::BlockConstPoolScope block_const_pool(masm());
5334 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5335 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5336 while (padding_size > 0) {
5337 __ nop();
5338 padding_size -= Assembler::kInstrSize;
5339 }
5340 }
5341 }
5342 last_lazy_deopt_pc_ = masm()->pc_offset();
5343}
5344
5345
5346void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5347 last_lazy_deopt_pc_ = masm()->pc_offset();
5348 DCHECK(instr->HasEnvironment());
5349 LEnvironment* env = instr->environment();
5350 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5351 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5352}
5353
5354
5355void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5356 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5357 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5358 // needed return address), even though the implementation of LAZY and EAGER is
5359 // now identical. When LAZY is eventually completely folded into EAGER, remove
5360 // the special case below.
5361 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5362 type = Deoptimizer::LAZY;
5363 }
5364
5365 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
5366}
5367
5368
5369void LCodeGen::DoDummy(LDummy* instr) {
5370 // Nothing to see here, move on!
5371}
5372
5373
5374void LCodeGen::DoDummyUse(LDummyUse* instr) {
5375 // Nothing to see here, move on!
5376}
5377
5378
5379void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5380 PushSafepointRegistersScope scope(this);
5381 LoadContextFromDeferred(instr->context());
5382 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5383 RecordSafepointWithLazyDeopt(
5384 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5385 DCHECK(instr->HasEnvironment());
5386 LEnvironment* env = instr->environment();
5387 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5388}
5389
5390
5391void LCodeGen::DoStackCheck(LStackCheck* instr) {
5392 class DeferredStackCheck final : public LDeferredCode {
5393 public:
5394 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5395 : LDeferredCode(codegen), instr_(instr) { }
5396 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5397 LInstruction* instr() override { return instr_; }
5398
5399 private:
5400 LStackCheck* instr_;
5401 };
5402
5403 DCHECK(instr->HasEnvironment());
5404 LEnvironment* env = instr->environment();
5405 // There is no LLazyBailout instruction for stack-checks. We have to
5406 // prepare for lazy deoptimization explicitly here.
5407 if (instr->hydrogen()->is_function_entry()) {
5408 // Perform stack overflow check.
5409 Label done;
5410 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5411 __ cmp(sp, Operand(ip));
5412 __ b(hs, &done);
5413 Handle<Code> stack_check = isolate()->builtins()->StackCheck();
5414 PredictableCodeSizeScope predictable(masm());
5415 predictable.ExpectSize(CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
5416 DCHECK(instr->context()->IsRegister());
5417 DCHECK(ToRegister(instr->context()).is(cp));
5418 CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
5419 __ bind(&done);
5420 } else {
5421 DCHECK(instr->hydrogen()->is_backwards_branch());
5422 // Perform stack overflow check if this goto needs it before jumping.
5423 DeferredStackCheck* deferred_stack_check =
5424 new(zone()) DeferredStackCheck(this, instr);
5425 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5426 __ cmp(sp, Operand(ip));
5427 __ b(lo, deferred_stack_check->entry());
5428 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5429 __ bind(instr->done_label());
5430 deferred_stack_check->SetExit(instr->done_label());
5431 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5432 // Don't record a deoptimization index for the safepoint here.
5433 // This will be done explicitly when emitting call and the safepoint in
5434 // the deferred code.
5435 }
5436}
5437
5438
5439void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5440 // This is a pseudo-instruction that ensures that the environment here is
5441 // properly registered for deoptimization and records the assembler's PC
5442 // offset.
5443 LEnvironment* environment = instr->environment();
5444
5445 // If the environment were already registered, we would have no way of
5446 // backpatching it with the spill slot operands.
5447 DCHECK(!environment->HasBeenRegistered());
5448 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5449
5450 GenerateOsrPrologue();
5451}
5452
5453
5454void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005455 Label use_cache, call_runtime;
Ben Murdoch097c5b22016-05-18 11:27:45 +01005456 __ CheckEnumCache(&call_runtime);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005457
5458 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
5459 __ b(&use_cache);
5460
5461 // Get the set of properties to enumerate.
5462 __ bind(&call_runtime);
5463 __ push(r0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005464 CallRuntime(Runtime::kForInEnumerate, instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005465 __ bind(&use_cache);
5466}
5467
5468
5469void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5470 Register map = ToRegister(instr->map());
5471 Register result = ToRegister(instr->result());
5472 Label load_cache, done;
5473 __ EnumLength(result, map);
5474 __ cmp(result, Operand(Smi::FromInt(0)));
5475 __ b(ne, &load_cache);
5476 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5477 __ jmp(&done);
5478
5479 __ bind(&load_cache);
5480 __ LoadInstanceDescriptors(map, result);
5481 __ ldr(result,
5482 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5483 __ ldr(result,
5484 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5485 __ cmp(result, Operand::Zero());
5486 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
5487
5488 __ bind(&done);
5489}
5490
5491
5492void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5493 Register object = ToRegister(instr->value());
5494 Register map = ToRegister(instr->map());
5495 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5496 __ cmp(map, scratch0());
5497 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5498}
5499
5500
5501void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5502 Register result,
5503 Register object,
5504 Register index) {
5505 PushSafepointRegistersScope scope(this);
5506 __ Push(object);
5507 __ Push(index);
5508 __ mov(cp, Operand::Zero());
5509 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5510 RecordSafepointWithRegisters(
5511 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5512 __ StoreToSafepointRegisterSlot(r0, result);
5513}
5514
5515
5516void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5517 class DeferredLoadMutableDouble final : public LDeferredCode {
5518 public:
5519 DeferredLoadMutableDouble(LCodeGen* codegen,
5520 LLoadFieldByIndex* instr,
5521 Register result,
5522 Register object,
5523 Register index)
5524 : LDeferredCode(codegen),
5525 instr_(instr),
5526 result_(result),
5527 object_(object),
5528 index_(index) {
5529 }
5530 void Generate() override {
5531 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5532 }
5533 LInstruction* instr() override { return instr_; }
5534
5535 private:
5536 LLoadFieldByIndex* instr_;
5537 Register result_;
5538 Register object_;
5539 Register index_;
5540 };
5541
5542 Register object = ToRegister(instr->object());
5543 Register index = ToRegister(instr->index());
5544 Register result = ToRegister(instr->result());
5545 Register scratch = scratch0();
5546
5547 DeferredLoadMutableDouble* deferred;
5548 deferred = new(zone()) DeferredLoadMutableDouble(
5549 this, instr, result, object, index);
5550
5551 Label out_of_object, done;
5552
5553 __ tst(index, Operand(Smi::FromInt(1)));
5554 __ b(ne, deferred->entry());
5555 __ mov(index, Operand(index, ASR, 1));
5556
5557 __ cmp(index, Operand::Zero());
5558 __ b(lt, &out_of_object);
5559
5560 __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
5561 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5562
5563 __ b(&done);
5564
5565 __ bind(&out_of_object);
5566 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5567 // Index is equal to negated out of object property index plus 1.
5568 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
5569 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
5570 __ ldr(result, FieldMemOperand(scratch,
5571 FixedArray::kHeaderSize - kPointerSize));
5572 __ bind(deferred->exit());
5573 __ bind(&done);
5574}
5575
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005576#undef __
5577
5578} // namespace internal
5579} // namespace v8